From b7847199044e46e0a138586e6f525ef071b3e1f8 Mon Sep 17 00:00:00 2001 From: Zakk Chen Date: Sun, 13 Feb 2022 17:28:44 -0800 Subject: [PATCH] [RISCV] Add the passthru operand for RVV nomask binary intrinsics. The goal is support tail and mask policy in RVV builtins. We focus on IR part first. If the passthru operand is undef, we use tail agnostic, otherwise use tail undisturbed. Add passthru operand for VSLIDE1UP_VL and VSLIDE1DOWN_VL to support i64 scalar in rv32. The masked VSLIDE1 would only emit mask undisturbed policy regardless of giving mask agnostic policy until InsertVSETVLI supports mask agnostic. Reviewed by: craig.topper, rogfer01 Differential Revision: https://reviews.llvm.org/D117989 --- clang/include/clang/Basic/riscv_vector.td | 45 +- .../test/CodeGen/RISCV/riscv-attr-builtin-alias.c | 2 +- .../RISCV/rvv-intrinsics-overloaded/vaadd.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vasub.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vfabs.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfadd.c | 60 +- .../RISCV/rvv-intrinsics-overloaded/vfdiv.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmax.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmin.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmul.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfneg.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfrdiv.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfrsub.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfsgnj.c | 108 +- .../RISCV/rvv-intrinsics-overloaded/vfslide1down.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfslide1up.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfsub.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfwadd.c | 32 +- .../RISCV/rvv-intrinsics-overloaded/vfwmul.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwsub.c | 32 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vmul-eew64.c | 48 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c | 392 ++-- .../RISCV/rvv-intrinsics-overloaded/vnclip.c | 120 +- .../RISCV/rvv-intrinsics-overloaded/vncvt.c | 60 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c | 44 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vnsra.c | 60 +- .../RISCV/rvv-intrinsics-overloaded/vnsrl.c | 60 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vrgather.c | 314 +-- .../RISCV/rvv-intrinsics-overloaded/vrsub.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vsadd.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vslide1down.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vslide1up.c | 88 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vsmul.c | 72 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c | 88 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vssra.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vssrl.c | 88 +- .../RISCV/rvv-intrinsics-overloaded/vssub.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c | 176 +- .../RISCV/rvv-intrinsics-overloaded/vwadd.c | 240 +-- .../RISCV/rvv-intrinsics-overloaded/vwcvt.c | 60 +- .../RISCV/rvv-intrinsics-overloaded/vwmul.c | 180 +- .../RISCV/rvv-intrinsics-overloaded/vwsub.c | 240 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c | 30 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c | 30 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c | 30 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c | 30 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c | 180 +- .../CodeGen/RISCV/rvv-intrinsics/vfslide1down.c | 30 +- .../test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c | 30 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c | 72 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c | 36 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c | 72 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c | 176 +- .../test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c | 48 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c | 392 ++-- clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c | 120 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c | 44 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c | 350 ++-- clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics/vslide1down.c | 88 +- .../test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c | 176 +- .../CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c | 16 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c | 72 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c | 88 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c | 176 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c | 240 +-- clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c | 60 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c | 180 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c | 240 +-- clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c | 176 +- clang/utils/TableGen/RISCVVEmitter.cpp | 29 +- llvm/include/llvm/IR/IntrinsicsRISCV.td | 76 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 66 +- llvm/lib/Target/RISCV/RISCVISelLowering.h | 6 +- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 185 +- llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td | 25 +- .../RISCV/rvv/access-fixed-objects-by-rvv.ll | 2 + .../CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll | 115 ++ .../CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll | 6 +- .../CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll | 2093 +++++++++++++++++++- llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll | 2 + llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vfadd.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfdiv.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfmax.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfmin.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfmul.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll | 30 + llvm/test/CodeGen/RISCV/rvv/vfrsub.ll | 30 + llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll | 30 + llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll | 30 + llvm/test/CodeGen/RISCV/rvv/vfsub.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vfwadd.ll | 36 + llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vfwmul.ll | 36 + llvm/test/CodeGen/RISCV/rvv/vfwsub.ll | 36 + llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll | 75 + llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll | 165 ++ llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll | 185 ++ llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll | 66 + llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll | 66 + llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll | 110 + .../CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll | 74 +- .../CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir | 16 +- llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll | 4 + llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir | 10 +- llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll | 44 + llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll | 111 ++ llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll | 111 ++ llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll | 98 + llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll | 98 + llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll | 88 + llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll | 60 + llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll | 74 + llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll | 110 + llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll | 110 + 242 files changed, 18025 insertions(+), 5985 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index d02a4ff..c1d21d6 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -173,20 +173,13 @@ class RVVBuiltin { cast(ResultType)->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1321,6 +1316,8 @@ multiclass RVVPseudoVNotBuiltin { Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getAllOnesValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1369,6 +1366,8 @@ multiclass RVVPseudoVFUnaryBuiltin { IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, Ops[0]); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1402,6 +1401,8 @@ multiclass RVVPseudoVWCVTBuiltin(Ops[0]->getType())->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1438,6 +1439,8 @@ multiclass RVVPseudoVNCVTBuiltingetType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1583,15 +1586,18 @@ defm : RVVIndexedSegStore<"vsoxseg">; // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract +let HasNoMaskPassThru = true in { defm vadd : RVVIntBinBuiltinSet; defm vsub : RVVIntBinBuiltinSet; defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", [["vx", "v", "vve"], ["vx", "Uv", "UvUvUe"]]>; +} defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; // 12.2. Vector Widening Integer Add/Subtract // Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW +let HasNoMaskPassThru = true in { defm vwaddu : RVVUnsignedWidenBinBuiltinSet; defm vwsubu : RVVUnsignedWidenBinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = SEW +/- SEW @@ -1603,6 +1609,7 @@ defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW defm vwadd : RVVSignedWidenOp0BinBuiltinSet; defm vwsub : RVVSignedWidenOp0BinBuiltinSet; +} defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", [["Uw", "UwUv"]]>; defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", @@ -1633,12 +1640,15 @@ let HasMask = false, HasPolicy = false in { } // 12.5. Vector Bitwise Logical Instructions +let HasNoMaskPassThru = true in { defm vand : RVVIntBinBuiltinSet; defm vxor : RVVIntBinBuiltinSet; defm vor : RVVIntBinBuiltinSet; +} defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; // 12.6. Vector Single-Width Bit Shift Instructions +let HasNoMaskPassThru = true in { defm vsll : RVVShiftBuiltinSet; defm vsrl : RVVUnsignedShiftBuiltinSet; defm vsra : RVVSignedShiftBuiltinSet; @@ -1646,6 +1656,7 @@ defm vsra : RVVSignedShiftBuiltinSet; // 12.7. Vector Narrowing Integer Right Shift Instructions defm vnsrl : RVVUnsignedNShiftBuiltinSet; defm vnsra : RVVSignedNShiftBuiltinSet; +} defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", [["v", "vw"], ["Uv", "UvUw"]]>; @@ -1665,6 +1676,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet; } // 12.9. Vector Integer Min/Max Instructions +let HasNoMaskPassThru = true in { defm vminu : RVVUnsignedBinBuiltinSet; defm vmin : RVVSignedBinBuiltinSet; defm vmaxu : RVVUnsignedBinBuiltinSet; @@ -1685,9 +1697,10 @@ defm vdivu : RVVUnsignedBinBuiltinSet; defm vdiv : RVVSignedBinBuiltinSet; defm vremu : RVVUnsignedBinBuiltinSet; defm vrem : RVVSignedBinBuiltinSet; +} // 12.12. Vector Widening Integer Multiply Instructions -let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +let Log2LMUL = [-3, -2, -1, 0, 1, 2], HasNoMaskPassThru = true in { defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", [["vv", "w", "wvv"], ["vx", "w", "wve"]]>; @@ -1751,6 +1764,7 @@ let HasMask = false, HasPolicy = false in { // 13. Vector Fixed-Point Arithmetic Instructions // 13.1. Vector Single-Width Saturating Add and Subtract +let HasNoMaskPassThru = true in { defm vsaddu : RVVUnsignedBinBuiltinSet; defm vsadd : RVVSignedBinBuiltinSet; defm vssubu : RVVUnsignedBinBuiltinSet; @@ -1800,6 +1814,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in { [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; } +} // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions defm vfmacc : RVVFloatingTerBuiltinSet; @@ -1827,6 +1842,7 @@ def vfrsqrt7 : RVVFloatingUnaryVVBuiltin; def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions +let HasNoMaskPassThru = true in { defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; @@ -1834,6 +1850,7 @@ defm vfmax : RVVFloatingBinBuiltinSet; defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; +} defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; @@ -2005,6 +2022,7 @@ defm vslideup : RVVSlideBuiltinSet; defm vslidedown : RVVSlideBuiltinSet; // 17.3.3. Vector Slide1up Instructions +let HasNoMaskPassThru = true in { defm vslide1up : RVVSlideOneBuiltinSet; defm vfslide1up : RVVFloatingBinVFBuiltinSet; @@ -2027,6 +2045,7 @@ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil", [["vx", "Uv", "UvUvz"]]>; defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; +} // 17.5. Vector Compress Instruction let HasMask = false, HasPolicy = false, diff --git a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c index dab41f1..e8445d4 100644 --- a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c +++ b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c @@ -25,7 +25,7 @@ vint8m1_t vadd_generic (vint8m1_t op0, vint8m1_t op1, size_t op2); // CHECK-NEXT: [[TMP0:%.*]] = load , * [[OP0_ADDR]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[OP1_ADDR]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[VL_ADDR]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[TMP0]], [[TMP1]], i64 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[TMP0]], [[TMP1]], i64 [[TMP2]]) // CHECK-NEXT: store [[TMP3]], * [[RET]], align 1 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RET]], align 1 // CHECK-NEXT: ret [[TMP4]] diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c index 9b6b966..8e7c4a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c index e3ff19a..bc05aa0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c index 909da39..9bddb61f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c index 8def459..98570c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c index e5f8d3f..e0e804d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c index f784db2..7d098fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c index 0d7c709..455c0dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c index f8c882e..05e3acb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c index 1ae5f28..c8f874d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c index 560d030..2328aaf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c index 6051fcf..c5f9992 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c index b2f4fb3..c688b4c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c index 8c19f7b..ed789ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c index e407104..e975dae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c index e5fa14a..a1e2c0c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -178,7 +178,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -188,7 +188,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -207,7 +207,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -216,7 +216,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -226,7 +226,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -235,7 +235,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -245,7 +245,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -254,7 +254,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -264,7 +264,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -273,7 +273,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -283,7 +283,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -292,7 +292,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -302,7 +302,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -311,7 +311,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -321,7 +321,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -340,7 +340,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -349,7 +349,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -359,7 +359,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -378,7 +378,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -387,7 +387,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -397,7 +397,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -406,7 +406,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -416,7 +416,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -425,7 +425,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -435,7 +435,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -444,7 +444,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -454,7 +454,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -473,7 +473,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -482,7 +482,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -492,7 +492,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -501,7 +501,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -511,7 +511,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c index e70c63f..550c4e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c index 989fd28..794ecc7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c index 2df9853..5cf2947 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c index 2a5bf9f..73d5070 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c index 8d5396c..fe0eebf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c index 2dd4350..4d71261 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c index ee09488..7d83678 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c index 5c269c1..0c6bf71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c index a69e943..7226993 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c index b08b304..a93c1d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c index 29ccbe1..bd2af8f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c index d25b1b4..104a574 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c index a52e567..d6d3ea9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c index 642055f..696d892 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c index c911a40..f580d8c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c index 315a680..7a1b461 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c index 526f78a..44349ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c index ddff28e..e8041d4d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c index 7e4ef5e..1a2ddb8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -17,7 +17,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -26,7 +26,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -36,7 +36,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -45,7 +45,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -55,7 +55,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -64,7 +64,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -73,7 +73,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -82,7 +82,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -91,7 +91,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -100,7 +100,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -109,7 +109,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -118,7 +118,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -127,7 +127,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -136,7 +136,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -146,7 +146,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -155,7 +155,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -165,7 +165,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -174,7 +174,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -184,7 +184,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -193,7 +193,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -203,7 +203,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -212,7 +212,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -222,7 +222,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -231,7 +231,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -241,7 +241,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -250,7 +250,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -260,7 +260,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -269,7 +269,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -279,7 +279,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -288,7 +288,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -298,7 +298,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -307,7 +307,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -317,7 +317,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -326,7 +326,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -336,7 +336,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -345,7 +345,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -355,7 +355,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -364,7 +364,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -374,7 +374,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -383,7 +383,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -393,7 +393,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -402,7 +402,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -412,7 +412,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -421,7 +421,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -431,7 +431,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -450,7 +450,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -459,7 +459,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -469,7 +469,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -478,7 +478,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -487,7 +487,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -496,7 +496,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -505,7 +505,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -514,7 +514,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -523,7 +523,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -532,7 +532,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -541,7 +541,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -550,7 +550,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -560,7 +560,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -570,7 +570,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -580,7 +580,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -590,7 +590,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -600,7 +600,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -609,7 +609,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -619,7 +619,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -628,7 +628,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -638,7 +638,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -647,7 +647,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -657,7 +657,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -666,7 +666,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -676,7 +676,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -686,7 +686,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -696,7 +696,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -705,7 +705,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -715,7 +715,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -724,7 +724,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -734,7 +734,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -743,7 +743,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -753,7 +753,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -762,7 +762,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -772,7 +772,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -781,7 +781,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -791,7 +791,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -800,7 +800,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -810,7 +810,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -819,7 +819,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -829,7 +829,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -838,7 +838,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -848,7 +848,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -858,7 +858,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -868,7 +868,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -877,7 +877,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -887,7 +887,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -896,7 +896,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -906,7 +906,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -915,7 +915,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -925,7 +925,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -934,7 +934,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -944,7 +944,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -953,7 +953,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -963,7 +963,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -972,7 +972,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -982,7 +982,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -991,7 +991,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1001,7 +1001,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1020,7 +1020,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1030,7 +1030,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1040,7 +1040,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1077,7 +1077,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1087,7 +1087,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1097,7 +1097,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1107,7 +1107,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1117,7 +1117,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1127,7 +1127,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1137,7 +1137,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1147,7 +1147,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1157,7 +1157,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1167,7 +1167,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1177,7 +1177,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1187,7 +1187,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1197,7 +1197,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1207,7 +1207,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1217,7 +1217,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1227,7 +1227,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1247,7 +1247,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1257,7 +1257,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1267,7 +1267,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1277,7 +1277,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1287,7 +1287,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1297,7 +1297,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1307,7 +1307,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1317,7 +1317,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1327,7 +1327,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1337,7 +1337,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1347,7 +1347,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1357,7 +1357,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1367,7 +1367,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1377,7 +1377,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1387,7 +1387,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1397,7 +1397,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1407,7 +1407,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1417,7 +1417,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1427,7 +1427,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1437,7 +1437,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1447,7 +1447,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1457,7 +1457,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1467,7 +1467,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1477,7 +1477,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1487,7 +1487,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1497,7 +1497,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1507,7 +1507,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c index d313ab0..e5d8565 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c index a541857..627b5c9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c index 0e200dc..b935758 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c index 7d8056e..bbd6d65 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c index 5101560..d119318 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c index dc05b51..f67a014 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c index 3905826..7a0374c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c index c7855bb..6455f38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c index bf0e538..968377d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c index 2427f37..7ce1fe3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c index d393fa1..d723aa4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c index 04b91eb..e547e43 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c index 720ddad..7cb6068 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c index 7d66d09..73d9fd6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c index 172ea75..f431ea5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c index 3e6eaaa..09e6e48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c index 9080a62..d355f29 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c index 52d0d07..d00be9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c index cf62812..77afacb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c index f8070ce..bcfbcb0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c index 54f63cd..26b8e9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c index 311881e..ac70115 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c index 7d18db0..3451d36 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c index 18495dc..94a75de 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c index 4757cd9..e67009a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c index c2c5b12..c7b2527 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c index 052c02f..299d4cb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c index 29c2b4c..3ef7f0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c index 0be5376..562eb61 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c index 50dc403..64ed627 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfneg_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c index c201f27c..92c9bce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c index 7428f43..12974a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c index 277b803..ab48549 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -189,7 +189,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -198,7 +198,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -208,7 +208,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -217,7 +217,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -227,7 +227,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -236,7 +236,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -246,7 +246,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -255,7 +255,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -265,7 +265,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -274,7 +274,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -284,7 +284,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -293,7 +293,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -303,7 +303,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -312,7 +312,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -322,7 +322,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -331,7 +331,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -341,7 +341,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -360,7 +360,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -369,7 +369,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -379,7 +379,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -388,7 +388,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -398,7 +398,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -407,7 +407,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -417,7 +417,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -426,7 +426,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -436,7 +436,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -455,7 +455,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -464,7 +464,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -474,7 +474,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -483,7 +483,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -493,7 +493,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -502,7 +502,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -512,7 +512,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1099,7 +1099,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1108,7 +1108,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1126,7 +1126,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1135,7 +1135,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1144,7 +1144,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1153,7 +1153,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1162,7 +1162,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1171,7 +1171,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1189,7 +1189,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1198,7 +1198,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1216,7 +1216,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1225,7 +1225,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1234,7 +1234,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1243,7 +1243,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1252,7 +1252,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1261,7 +1261,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1279,7 +1279,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1288,7 +1288,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1324,7 +1324,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1333,7 +1333,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1342,7 +1342,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1351,7 +1351,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1360,7 +1360,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1369,7 +1369,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1378,7 +1378,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1396,7 +1396,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1405,7 +1405,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c index 9c01c32..624e55f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, @@ -203,7 +203,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -212,7 +212,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, si // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -221,7 +221,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, si // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -230,7 +230,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -239,7 +239,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -248,7 +248,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c index 389c762..493473e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, @@ -198,7 +198,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -207,7 +207,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -216,7 +216,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -225,7 +225,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -234,7 +234,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -243,7 +243,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c index 88a4c9a..bbb00da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c index 87f59f8..818c162 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c index 0254680..49ca097 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -168,7 +168,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -177,7 +177,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -186,7 +186,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -204,7 +204,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -213,7 +213,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -222,7 +222,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -231,7 +231,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c index 410b3c8..e295290 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c index 17a4c55..6c24e04 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c index bb31db3..db1a83f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c index 9768676..48f1386 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c index c2c4522..2e0c093 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c index 81dc0a7..582b4f3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c index 6bdb8ba..3b5dd62 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c index e83e29b..8501b4c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c index 0736b3c..e8a9759 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c index f86d535..c3cfb74 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c index 33d3479..9274361 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c index 9563004..ee25469 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c index 01024f1..3164f1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c index afc7638..6e787cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -18,7 +18,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -27,7 +27,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -37,7 +37,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -46,7 +46,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -56,7 +56,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -65,7 +65,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -74,7 +74,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -83,7 +83,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -92,7 +92,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -101,7 +101,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -110,7 +110,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -119,7 +119,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -128,7 +128,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -137,7 +137,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -147,7 +147,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -156,7 +156,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -166,7 +166,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -175,7 +175,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -185,7 +185,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -194,7 +194,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -204,7 +204,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -213,7 +213,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -223,7 +223,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -232,7 +232,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -242,7 +242,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -251,7 +251,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -261,7 +261,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -270,7 +270,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -280,7 +280,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -289,7 +289,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -299,7 +299,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -308,7 +308,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -318,7 +318,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -327,7 +327,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -337,7 +337,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -346,7 +346,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -356,7 +356,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -365,7 +365,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -375,7 +375,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -384,7 +384,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -394,7 +394,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -403,7 +403,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -413,7 +413,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -422,7 +422,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -432,7 +432,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -441,7 +441,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -451,7 +451,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -470,7 +470,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -479,7 +479,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -488,7 +488,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -497,7 +497,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -506,7 +506,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -515,7 +515,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -524,7 +524,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -533,7 +533,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -542,7 +542,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -551,7 +551,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -561,7 +561,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -571,7 +571,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -581,7 +581,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -591,7 +591,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -601,7 +601,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -610,7 +610,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -620,7 +620,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -629,7 +629,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -639,7 +639,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -648,7 +648,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -658,7 +658,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -667,7 +667,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -677,7 +677,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -687,7 +687,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -697,7 +697,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -706,7 +706,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -716,7 +716,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -725,7 +725,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -735,7 +735,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -744,7 +744,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -754,7 +754,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -763,7 +763,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -773,7 +773,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -782,7 +782,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -792,7 +792,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -801,7 +801,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -811,7 +811,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -820,7 +820,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -830,7 +830,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -839,7 +839,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -849,7 +849,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -859,7 +859,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -869,7 +869,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -878,7 +878,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -888,7 +888,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -897,7 +897,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -907,7 +907,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -916,7 +916,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -926,7 +926,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -935,7 +935,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -945,7 +945,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -954,7 +954,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -964,7 +964,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -973,7 +973,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -983,7 +983,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -992,7 +992,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1002,7 +1002,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1011,7 +1011,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1021,7 +1021,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1031,7 +1031,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1041,7 +1041,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1078,7 +1078,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1088,7 +1088,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1098,7 +1098,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1108,7 +1108,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1118,7 +1118,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1128,7 +1128,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1138,7 +1138,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1148,7 +1148,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1158,7 +1158,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1168,7 +1168,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1178,7 +1178,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1188,7 +1188,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1198,7 +1198,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1208,7 +1208,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1218,7 +1218,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1228,7 +1228,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1238,7 +1238,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1248,7 +1248,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1258,7 +1258,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1268,7 +1268,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1278,7 +1278,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1288,7 +1288,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1298,7 +1298,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1308,7 +1308,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1318,7 +1318,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1328,7 +1328,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1338,7 +1338,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1348,7 +1348,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1358,7 +1358,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1368,7 +1368,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1378,7 +1378,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1388,7 +1388,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1398,7 +1398,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1408,7 +1408,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1418,7 +1418,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1428,7 +1428,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1438,7 +1438,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1448,7 +1448,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1458,7 +1458,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1468,7 +1468,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1478,7 +1478,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1488,7 +1488,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1498,7 +1498,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1508,7 +1508,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, @@ -3212,7 +3212,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { @@ -3221,7 +3221,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, si // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) { @@ -3230,7 +3230,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t v // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { @@ -3239,7 +3239,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, si // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) { @@ -3248,7 +3248,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t v // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) { @@ -3257,7 +3257,7 @@ vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) { @@ -3266,7 +3266,7 @@ vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) { @@ -3275,7 +3275,7 @@ vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) { @@ -3284,7 +3284,7 @@ vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) { @@ -3293,7 +3293,7 @@ vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) { @@ -3302,7 +3302,7 @@ vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) { @@ -3311,7 +3311,7 @@ vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) { @@ -3320,7 +3320,7 @@ vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -3329,7 +3329,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -3338,7 +3338,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -3347,7 +3347,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -3356,7 +3356,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -3365,7 +3365,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c index d7688d2..b5f951f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c index abfe106..166bc19 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c index 867fe09..26b72ea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c index 61d7023..b903528 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c index a7f2dec..67e7033 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c index 43983b1..c92c792 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c index e1db9d5..fd821e4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c index ddf1f8b..2b3352e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c index 3150746..c7d59a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c index 95e9cf8..b14e0b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c index fccf57a..464d78c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c index dc16819..1ed8555c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c index 4275d13..30929ff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c index 77036c4d..93f3581 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c index 4c4c6a2..03d06da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c index 023133b..3a86c65 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c index 9422cd4..f2b4098 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c index be076ea..0e77510 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index eb718ff..9888e6c 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -162,6 +162,7 @@ private: bool IsMask; bool HasVL; bool HasPolicy; + bool HasNoMaskPassThru; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -177,8 +178,8 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &Types, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF); ~RVVIntrinsic() = default; @@ -188,6 +189,7 @@ public: StringRef getMangledName() const { return MangledName; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } + bool hasNoMaskPassThru() const { return HasNoMaskPassThru; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -770,12 +772,14 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &OutInTypes, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), + HasNoMaskPassThru(HasNoMaskPassThru), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { @@ -823,7 +827,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; - if (IsMask && HasMaskedOffOperand) { + if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPassThru)) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; @@ -860,6 +864,9 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } + } else if (hasNoMaskPassThru()) { + OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n"; + OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n"; } OS << " IntrinsicTypes = {"; @@ -1107,6 +1114,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) { PrintFatalError("Builtin with same name has different HasPolicy"); else if (P.first->second->hasPolicy() != Def->hasPolicy()) PrintFatalError("Builtin with same name has different HasPolicy"); + else if (P.first->second->hasNoMaskPassThru() != Def->hasNoMaskPassThru()) + PrintFatalError("Builtin with same name has different HasNoMaskPassThru"); else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes()) PrintFatalError("Builtin with same name has different IntrinsicTypes"); } @@ -1154,6 +1163,7 @@ void RVVEmitter::createRVVIntrinsics( bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); bool HasPolicy = R->getValueAsBit("HasPolicy"); + bool HasNoMaskPassThru = R->getValueAsBit("HasNoMaskPassThru"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); @@ -1228,8 +1238,8 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + Types.getValue(), IntrinsicTypes, RequiredFeatures, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1237,8 +1247,9 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, + ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, + RequiredFeatures, NF)); } } // end for Log2LMULList } // end for TypeRange diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 57b0f79..9ec8a02 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -383,12 +383,13 @@ let TargetPrefix = "riscv" in { let VLOperand = 2; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, int_vector_in, vl) + // Input: (passthru, vector_in, int_vector_in, vl) class RISCVRGatherVVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) @@ -400,13 +401,14 @@ let TargetPrefix = "riscv" in { [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } - // Input: (vector_in, int16_vector_in, vl) + // Input: (passthru, vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) @@ -421,12 +423,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is the same as first source vector, and the // second operand is XLen. - // Input: (vector_in, xlen_in, vl) + // Input: (passthru, vector_in, xlen_in, vl) class RISCVGatherVXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. @@ -440,13 +443,14 @@ let TargetPrefix = "riscv" in { let VLOperand = 4; } // For destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -461,12 +465,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -480,13 +485,14 @@ let TargetPrefix = "riscv" in { let VLOperand = 4; } // For destination vector type is NOT the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -501,12 +507,13 @@ let TargetPrefix = "riscv" in { } // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -595,13 +602,14 @@ let TargetPrefix = "riscv" in { } // For Saturating binary operations. // The destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -618,12 +626,13 @@ let TargetPrefix = "riscv" in { // For Saturating binary operations. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -640,12 +649,13 @@ let TargetPrefix = "riscv" in { // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 8ddbb072..53bdd4b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4475,10 +4475,12 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, InsertI64VL); // First slide in the hi value, then the lo in underneath it. - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValHi, I32Mask, InsertI64VL); - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValLo, I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi, + I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo, + I32Mask, InsertI64VL); // Bitcast back to the right container type. ValInVec = DAG.getBitcast(ContainerVT, ValInVec); } @@ -4774,8 +4776,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // We need to special case these when the scalar is larger than XLen. unsigned NumOps = Op.getNumOperands(); bool IsMasked = NumOps == 7; - unsigned OpOffset = IsMasked ? 1 : 0; - SDValue Scalar = Op.getOperand(2 + OpOffset); + SDValue Scalar = Op.getOperand(3); if (Scalar.getValueType().bitsLE(XLenVT)) break; @@ -4790,7 +4791,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Convert the vector source to the equivalent nxvXi32 vector. MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); - SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); + SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2)); SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, DAG.getConstant(0, DL, XLenVT)); @@ -4807,17 +4808,34 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Shift the two scalar parts in using SEW=32 slide1up/slide1down // instructions. - if (IntNo == Intrinsic::riscv_vslide1up || - IntNo == Intrinsic::riscv_vslide1up_mask) { - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); + SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1)); + if (!IsMasked) { + if (IntNo == Intrinsic::riscv_vslide1up) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + } } else { - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); + // TODO Those VSLIDE1 could be TAMA because we use vmerge to select + // maskedoff + SDValue Undef = DAG.getUNDEF(I32VT); + if (IntNo == Intrinsic::riscv_vslide1up_mask) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + } } // Convert back to nxvXi64. @@ -4825,11 +4843,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, if (!IsMasked) return Vec; - // Apply mask after the operation. SDValue Mask = Op.getOperand(NumOps - 3); SDValue MaskedOff = Op.getOperand(1); - return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); + // Assume Policy operand is the last operand. + uint64_t Policy = Op.getConstantOperandVal(NumOps - 1); + // We don't need to select maskedoff if it's undef. + if (MaskedOff.isUndef()) + return Vec; + // TAMU + if (Policy == RISCVII::TAIL_AGNOSTIC) + return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, + VL); + // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. + // It's fine because vmerge does not care mask policy. + return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL); } } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 2d1da98..134d5fa 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -155,9 +155,9 @@ enum NodeType : unsigned { // and the fifth the VL. VSLIDEUP_VL, VSLIDEDOWN_VL, - // Matches the semantics of vslide1up/slide1down. The first operand is the - // source vector, the second is the XLenVT scalar value. The third and fourth - // operands are the mask and VL operands. + // Matches the semantics of vslide1up/slide1down. The first operand is + // passthru operand, the second is source vector, third is the XLenVT scalar + // value. The fourth and fifth operands are the mask and VL operands. VSLIDE1UP_VL, VSLIDE1DOWN_VL, // Matches the semantics of the vid.v instruction, with a mask and VL diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 25b0282..7254348 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -996,6 +996,24 @@ class VPseudoBinaryNoMask(PseudoToVInst.VInst); } +class VPseudoBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -1017,6 +1035,25 @@ class VPseudoTiedBinaryNoMask(PseudoToVInst.VInst); } +class VPseudoTiedBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op2Class:$rs1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 0; // Merge is also rs2. + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoIStoreNoMask LMUL, bit Ordered>: Pseudo<(outs), @@ -1652,6 +1689,8 @@ multiclass VPseudoBinary; + def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, RISCVMaskedPseudo; @@ -1681,6 +1720,8 @@ multiclass VPseudoBinaryEmul; + def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy; } @@ -1693,6 +1734,8 @@ multiclass VPseudoTiedBinary; + def "_" # MInfo.MX # "_TIED_TU": VPseudoTiedBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask; } @@ -2820,14 +2863,14 @@ class VPatUnaryAnyMask; -class VPatBinaryNoMask : +class VPatBinaryM : Pat<(result_type (!cast(intrinsic_name) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2837,6 +2880,44 @@ class VPatBinaryNoMask; +class VPatBinaryNoMaskTA : + Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + +class VPatBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped : Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), (result_type result_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), @@ -2938,6 +3020,23 @@ class VPatTiedBinaryNoMask; +class VPatTiedBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TIED_TU") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + class VPatTiedBinaryMask { GPR:$vl, mti.Log2SEW)>; } -multiclass VPatBinary { - def : VPatBinaryNoMask; + def : VPatBinaryM; def : VPatBinaryMask; @@ -3176,8 +3275,10 @@ multiclass VPatBinaryTA { - def : VPatBinaryNoMask; + def : VPatBinaryNoMaskTA; + def : VPatBinaryNoMaskTU; def : VPatBinaryMaskTA; @@ -3349,9 +3450,9 @@ multiclass VPatBinaryV_VI { foreach mti = AllMasks in - def : VPatBinaryNoMask; + def : VPatBinaryM; } multiclass VPatBinaryW_VV; - let AddedComplexity = 1 in + def : VPatBinaryNoMaskTU; + let AddedComplexity = 1 in { + def : VPatTiedBinaryNoMaskTU; def : VPatTiedBinaryMask; + } def : VPatBinaryMaskTA { multiclass VPatBinaryM_VV vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinarySwappedM_VV vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryM; } } multiclass VPatBinaryM_VI vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinaryV_VV_VX_VI("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs2, + GPR:$vl, + vti.Log2SEW)>; def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), @@ -4098,7 +4217,8 @@ foreach vti = AllIntegerVectors in { (XLenVT timm:$policy))>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. - def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), VLOpFrag)), (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, @@ -4684,7 +4804,8 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. - def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), (XLenVT 1), VLOpFrag)), (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 7f56304..cefdd4d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1643,9 +1643,10 @@ def SDTRVVSlide : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> ]>; -def SDTRVVSlide1 : SDTypeProfile<1, 4, [ - SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>, - SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT> +def SDTRVVSlide1 : SDTypeProfile<1, 5, [ + SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, + SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT> ]>; def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; @@ -1660,16 +1661,30 @@ foreach vti = AllIntegerVectors in { VLOpFrag)), (!cast("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), + (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), + (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; } foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll index 4216c59..3e370c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -25,6 +25,7 @@ define @access_fixed_object(i64 *%val) { declare @llvm.riscv.vadd.nxv1i64.nxv1i64( , , + , i64); define @access_fixed_and_vector_objects(i64 *%val) { @@ -54,6 +55,7 @@ define @access_fixed_and_vector_objects(i64 *%val) { %len = load i64, i64* %local %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %v1, %v2, i64 %len) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll new file mode 100644 index 0000000..0c24fcf --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( + , + , + i64, + , + i32, + i32); + +define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 0) + + ret %a +} + +define @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 1) + + ret %a +} + + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 2) + + ret %a +} + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + i32 %3, i32 3) + + ret %a +} + +define @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + undef, + i32 %2, i32 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index 314c412..1fe6141 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -79,11 +79,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i32 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i32 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index c2e44e7..453aabd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -79,11 +79,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i64 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i64 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll index 3d314b1..21bd0ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32 -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64 declare @llvm.riscv.vle.nxv1i8( , @@ -118,3 +118,2088 @@ entry: ret %a } + +declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vand.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vand.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdivu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdivu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrdiv.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrdiv.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjn.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjn.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjx.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjx.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrsub.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrsub.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1down.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1down.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1up.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1up.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vl4re16.v v24, (a0) +; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v16, v24 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vl4re16.v v24, (a0) +; RV64-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v16, v24 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + + +declare @llvm.riscv.vslide1down.nxv1i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1down.vx v10, v9, a0 +; RV32-NEXT: vslide1down.vx v8, v10, a1 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1down.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.nxv1i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1up.vx v10, v9, a1 +; RV32-NEXT: vslide1up.vx v8, v10, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1up.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmaxu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmaxu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vminu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vminu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulh.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulh.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclip.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclip.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclipu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclipu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsra.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsra.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsrl.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsrl.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrem.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrem.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , + , + , + iXLen); +declare @llvm.riscv.vrgather.vv.nxv1i8.i32( + , + , + , + iXLen); + +define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv1i8.i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv1i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vx v8, v9, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv1i8( + %0, + %1, + iXLen %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgatherei16.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgatherei16.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsub.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vrsub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsadd.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsadd.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsll.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsll.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsmul.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssubu.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vxor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vxor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll index 13eb36d..2d9f005 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vaadd.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll index 4d692eb..76d556d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vaadd.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vaadd.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vaadd.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vaadd.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vaadd.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vaadd.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vaadd.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll index 4e796ebd..5d8b24e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vaaddu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll index 3f9ed34..4fad38c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vaaddu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vaaddu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vaaddu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vaaddu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vaaddu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vaaddu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vaaddu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll index 57db517..4436a39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll @@ -5,6 +5,7 @@ declare @llvm.riscv.vadd.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -15,6 +16,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll index 4e7b127..5f39548 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vadd.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vadd.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -58,6 +61,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: declare @llvm.riscv.vadd.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -102,6 +107,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -136,6 +142,7 @@ entry: declare @llvm.riscv.vadd.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -146,6 +153,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -180,6 +188,7 @@ entry: declare @llvm.riscv.vadd.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -190,6 +199,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -224,6 +234,7 @@ entry: declare @llvm.riscv.vadd.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -234,6 +245,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -268,6 +280,7 @@ entry: declare @llvm.riscv.vadd.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -278,6 +291,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -313,6 +327,7 @@ entry: declare @llvm.riscv.vadd.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -323,6 +338,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -357,6 +373,7 @@ entry: declare @llvm.riscv.vadd.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -367,6 +384,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -401,6 +419,7 @@ entry: declare @llvm.riscv.vadd.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -411,6 +430,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -445,6 +465,7 @@ entry: declare @llvm.riscv.vadd.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -455,6 +476,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -489,6 +511,7 @@ entry: declare @llvm.riscv.vadd.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -499,6 +522,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -533,6 +557,7 @@ entry: declare @llvm.riscv.vadd.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -543,6 +568,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -578,6 +604,7 @@ entry: declare @llvm.riscv.vadd.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -588,6 +615,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -622,6 +650,7 @@ entry: declare @llvm.riscv.vadd.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -632,6 +661,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -666,6 +696,7 @@ entry: declare @llvm.riscv.vadd.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -676,6 +707,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -710,6 +742,7 @@ entry: declare @llvm.riscv.vadd.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -720,6 +753,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -754,6 +788,7 @@ entry: declare @llvm.riscv.vadd.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -764,6 +799,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -799,6 +835,7 @@ entry: declare @llvm.riscv.vadd.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -809,6 +846,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -843,6 +881,7 @@ entry: declare @llvm.riscv.vadd.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -853,6 +892,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -887,6 +927,7 @@ entry: declare @llvm.riscv.vadd.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -897,6 +938,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -931,6 +973,7 @@ entry: declare @llvm.riscv.vadd.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -941,6 +984,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -975,6 +1019,7 @@ entry: declare @llvm.riscv.vadd.nxv1i8.i8( , + , i8, i32); @@ -986,6 +1031,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1019,6 +1065,7 @@ entry: declare @llvm.riscv.vadd.nxv2i8.i8( , + , i8, i32); @@ -1030,6 +1077,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1063,6 +1111,7 @@ entry: declare @llvm.riscv.vadd.nxv4i8.i8( , + , i8, i32); @@ -1074,6 +1123,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1107,6 +1157,7 @@ entry: declare @llvm.riscv.vadd.nxv8i8.i8( , + , i8, i32); @@ -1118,6 +1169,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1151,6 +1203,7 @@ entry: declare @llvm.riscv.vadd.nxv16i8.i8( , + , i8, i32); @@ -1162,6 +1215,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1195,6 +1249,7 @@ entry: declare @llvm.riscv.vadd.nxv32i8.i8( , + , i8, i32); @@ -1206,6 +1261,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1239,6 +1295,7 @@ entry: declare @llvm.riscv.vadd.nxv64i8.i8( , + , i8, i32); @@ -1250,6 +1307,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1283,6 +1341,7 @@ entry: declare @llvm.riscv.vadd.nxv1i16.i16( , + , i16, i32); @@ -1294,6 +1353,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1327,6 +1387,7 @@ entry: declare @llvm.riscv.vadd.nxv2i16.i16( , + , i16, i32); @@ -1338,6 +1399,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1371,6 +1433,7 @@ entry: declare @llvm.riscv.vadd.nxv4i16.i16( , + , i16, i32); @@ -1382,6 +1445,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1415,6 +1479,7 @@ entry: declare @llvm.riscv.vadd.nxv8i16.i16( , + , i16, i32); @@ -1426,6 +1491,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1459,6 +1525,7 @@ entry: declare @llvm.riscv.vadd.nxv16i16.i16( , + , i16, i32); @@ -1470,6 +1537,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1503,6 +1571,7 @@ entry: declare @llvm.riscv.vadd.nxv32i16.i16( , + , i16, i32); @@ -1514,6 +1583,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1547,6 +1617,7 @@ entry: declare @llvm.riscv.vadd.nxv1i32.i32( , + , i32, i32); @@ -1558,6 +1629,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1591,6 +1663,7 @@ entry: declare @llvm.riscv.vadd.nxv2i32.i32( , + , i32, i32); @@ -1602,6 +1675,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1635,6 +1709,7 @@ entry: declare @llvm.riscv.vadd.nxv4i32.i32( , + , i32, i32); @@ -1646,6 +1721,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1679,6 +1755,7 @@ entry: declare @llvm.riscv.vadd.nxv8i32.i32( , + , i32, i32); @@ -1690,6 +1767,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1723,6 +1801,7 @@ entry: declare @llvm.riscv.vadd.nxv16i32.i32( , + , i32, i32); @@ -1734,6 +1813,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1767,6 +1847,7 @@ entry: declare @llvm.riscv.vadd.nxv1i64.i64( , + , i64, i32); @@ -1784,6 +1865,7 @@ define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1823,6 +1905,7 @@ entry: declare @llvm.riscv.vadd.nxv2i64.i64( , + , i64, i32); @@ -1840,6 +1923,7 @@ define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1879,6 +1963,7 @@ entry: declare @llvm.riscv.vadd.nxv4i64.i64( , + , i64, i32); @@ -1896,6 +1981,7 @@ define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1935,6 +2021,7 @@ entry: declare @llvm.riscv.vadd.nxv8i64.i64( , + , i64, i32); @@ -1952,6 +2039,7 @@ define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1997,6 +2085,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2029,6 +2118,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2061,6 +2151,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2093,6 +2184,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2125,6 +2217,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2157,6 +2250,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2189,6 +2283,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2221,6 +2316,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2253,6 +2349,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2285,6 +2382,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2317,6 +2415,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2349,6 +2448,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2381,6 +2481,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2413,6 +2514,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2445,6 +2547,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2477,6 +2580,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2509,6 +2613,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2541,6 +2646,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2573,6 +2679,7 @@ define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2605,6 +2712,7 @@ define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2637,6 +2745,7 @@ define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2669,6 +2778,7 @@ define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll index a56bf1b..5f1bd68 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vadd.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vadd.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vadd.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vadd.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vadd.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vadd.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vadd.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vadd.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vadd.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vadd.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vadd.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vadd.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vadd.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vadd.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vadd.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vadd.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vadd.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vadd.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vadd.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vadd.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vadd.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vadd.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vadd.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vadd.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vadd.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vadd.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vadd.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vadd.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vadd.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vadd.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vadd.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vadd.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vadd.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vadd.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vadd.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vadd.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vadd.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vadd.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vadd.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vadd.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vadd.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vadd.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vadd.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vadd.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll index 89b9dbc..8504108 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vand.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vand.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vand.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vand.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vand.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vand.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vand.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vand.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vand.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vand.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vand.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vand.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vand.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vand.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vand.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vand.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vand.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vand.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vand.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vand.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vand.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vand.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vand.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vand.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vand.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vand.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vand.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vand.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vand.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vand.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vand.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vand.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vand.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vand.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vand.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vand.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vand.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vand.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vand.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vand.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vand.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vand.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vand.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vand.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll index 39a1d9b..2dfd333 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vand.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vand.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vand.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vand.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vand.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vand.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vand.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vand.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vand.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vand.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vand.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vand.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vand.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vand.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vand.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vand.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vand.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vand.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vand.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vand.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vand.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vand.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vand.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vand.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vand.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vand.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vand.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vand.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vand.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vand.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vand.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vand.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vand.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vand.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vand.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vand.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vand.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vand.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vand.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vand.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vand.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vand.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vand.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vand.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll index d1cfc97..fb1f892 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vasub.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vasub.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vasub.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vasub.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vasub.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vasub.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vasub.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vasub.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vasub.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vasub.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vasub.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vasub.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vasub.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vasub.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vasub.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vasub.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vasub.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vasub.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vasub.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vasub.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vasub.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vasub.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vasub.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vasub.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vasub.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vasub.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vasub.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vasub.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vasub.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vasub.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vasub.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vasub.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vasub.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vasub.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vasub.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vasub.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vasub.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vasub.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vasub.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vasub.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vasub.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vasub.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vasub.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vasub.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll index 6ad70da..595c05b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vasub.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vasub.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vasub.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vasub.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vasub.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vasub.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vasub.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vasub.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vasub.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vasub.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vasub.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vasub.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vasub.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vasub.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vasub.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vasub.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vasub.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vasub.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vasub.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vasub.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vasub.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vasub.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vasub.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vasub.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vasub.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vasub.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vasub.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vasub.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vasub.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vasub.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vasub.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vasub.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vasub.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vasub.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vasub.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vasub.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vasub.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vasub.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vasub.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vasub.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vasub.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vasub.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vasub.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vasub.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll index 4707aff..1845da2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vasubu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll index 74d4690..7b9824e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vasubu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vasubu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vasubu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vasubu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vasubu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vasubu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vasubu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll index e062069..bfc267b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vdiv.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll index 01f1100..349052d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vdiv.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vdiv.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vdiv.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vdiv.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vdiv.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vdiv.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vdiv.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll index b86b902..da95824 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vdivu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll index 7efea7c..d5355e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vdivu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vdivu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vdivu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vdivu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vdivu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vdivu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vdivu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 041580b..817c251 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfadd.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfadd.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfadd.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfadd.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfadd.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfadd.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfadd.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfadd.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfadd.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfadd.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfadd.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfadd.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfadd.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfadd.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfadd.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfadd.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfadd.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfadd.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfadd.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfadd.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfadd.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfadd.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfadd.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfadd.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfadd.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfadd.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfadd.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfadd.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfadd.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 0145f2a..c0e4b5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfdiv.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfdiv.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfdiv.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfdiv.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfdiv.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfdiv.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfdiv.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfdiv.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfdiv.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfdiv.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfdiv.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfdiv.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfdiv.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfdiv.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfdiv.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 4469819..cb9c3cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmax.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmax.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmax.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmax.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmax.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmax.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmax.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmax.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmax.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmax.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmax.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmax.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmax.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmax.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmax.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfmax.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmax.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmax.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmax.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmax.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmax.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmax.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmax.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmax.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmax.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmax.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmax.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmax.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmax.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmax.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmax.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll index e151e9f..4d6da26 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmin.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmin.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmin.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmin.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmin.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmin.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmin.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmin.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmin.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmin.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmin.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmin.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmin.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmin.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmin.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfmin.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmin.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmin.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmin.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmin.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmin.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmin.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmin.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmin.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmin.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmin.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmin.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmin.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmin.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmin.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmin.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 0f4c738..2301057 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfmul.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfmul.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfmul.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfmul.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfmul.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfmul.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfmul.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfmul.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfmul.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfmul.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfmul.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfmul.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfmul.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfmul.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfmul.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfmul.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfmul.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfmul.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfmul.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfmul.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfmul.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfmul.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfmul.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfmul.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfmul.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfmul.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfmul.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfmul.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfmul.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfmul.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfmul.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 58dc39b9..a872aad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( , + , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfrdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f16.f16( , + , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfrdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f16.f16( , + , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfrdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -140,6 +146,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f16.f16( , + , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfrdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -185,6 +193,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f16.f16( , + , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfrdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -230,6 +240,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv32f16.f16( , + , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfrdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -275,6 +287,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f32.f32( , + , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfrdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f32.f32( , + , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfrdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f32.f32( , + , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfrdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f32.f32( , + , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfrdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f32.f32( , + , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfrdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f64.f64( , + , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfrdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f64.f64( , + , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfrdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f64.f64( , + , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfrdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f64.f64( , + , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfrdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index 3fb2815..d1cabf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( , + , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfrsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vfrsub.nxv2f16.f16( , + , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfrsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vfrsub.nxv4f16.f16( , + , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfrsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -140,6 +146,7 @@ entry: declare @llvm.riscv.vfrsub.nxv8f16.f16( , + , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfrsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -185,6 +193,7 @@ entry: declare @llvm.riscv.vfrsub.nxv16f16.f16( , + , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfrsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -230,6 +240,7 @@ entry: declare @llvm.riscv.vfrsub.nxv32f16.f16( , + , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfrsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -275,6 +287,7 @@ entry: declare @llvm.riscv.vfrsub.nxv1f32.f32( , + , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfrsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vfrsub.nxv2f32.f32( , + , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfrsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vfrsub.nxv4f32.f32( , + , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfrsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vfrsub.nxv8f32.f32( , + , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfrsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vfrsub.nxv16f32.f32( , + , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfrsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vfrsub.nxv1f64.f64( , + , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfrsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vfrsub.nxv2f64.f64( , + , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfrsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vfrsub.nxv4f64.f64( , + , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfrsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vfrsub.nxv8f64.f64( , + , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfrsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index 65a5592..568d0e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnj.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnj.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnj.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnj.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnj.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnj.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnj.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnj.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnj.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnj.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnj.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnj.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnj.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnj.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnj.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index f16c8a6..4a1171b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnjn.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnjn.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnjn.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnjn.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnjn.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnjn.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnjn.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnjn.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnjn.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnjn.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnjn.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnjn.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnjn.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnjn.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnjn.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index edfd578..cd1f2d8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsgnjx.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsgnjx.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsgnjx.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsgnjx.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsgnjx.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsgnjx.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsgnjx.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsgnjx.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsgnjx.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsgnjx.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsgnjx.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsgnjx.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsgnjx.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsgnjx.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsgnjx.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index 6cbba48..2bcf3cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , + , half, iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfslide1down.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f16.f16( , + , half, iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfslide1down.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f16.f16( , + , half, iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfslide1down.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -140,6 +146,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f16.f16( , + , half, iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfslide1down.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -185,6 +193,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f16.f16( , + , half, iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfslide1down.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -230,6 +240,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv32f16.f16( , + , half, iXLen); @@ -241,6 +252,7 @@ define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfslide1down.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -275,6 +287,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f32.f32( , + , float, iXLen); @@ -286,6 +299,7 @@ define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfslide1down.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f32.f32( , + , float, iXLen); @@ -331,6 +346,7 @@ define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfslide1down.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f32.f32( , + , float, iXLen); @@ -376,6 +393,7 @@ define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfslide1down.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f32.f32( , + , float, iXLen); @@ -421,6 +440,7 @@ define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfslide1down.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f32.f32( , + , float, iXLen); @@ -466,6 +487,7 @@ define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfslide1down.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f64.f64( , + , double, iXLen); @@ -511,6 +534,7 @@ define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfslide1down.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f64.f64( , + , double, iXLen); @@ -556,6 +581,7 @@ define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfslide1down.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f64.f64( , + , double, iXLen); @@ -601,6 +628,7 @@ define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfslide1down.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f64.f64( , + , double, iXLen); @@ -646,6 +675,7 @@ define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfslide1down.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 695cf7a..b70fdec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( , + , half, iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfslide1up.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f16.f16( , + , half, iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfslide1up.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -97,6 +101,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f16.f16( , + , half, iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfslide1up.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -143,6 +149,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f16.f16( , + , half, iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfslide1up.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -189,6 +197,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f16.f16( , + , half, iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfslide1up.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -235,6 +245,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv32f16.f16( , + , half, iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfslide1up.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -281,6 +293,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f32.f32( , + , float, iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfslide1up.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -327,6 +341,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f32.f32( , + , float, iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfslide1up.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -373,6 +389,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f32.f32( , + , float, iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfslide1up.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -419,6 +437,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f32.f32( , + , float, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfslide1up.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -465,6 +485,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f32.f32( , + , float, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfslide1up.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -511,6 +533,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f64.f64( , + , double, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfslide1up.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -557,6 +581,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f64.f64( , + , double, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfslide1up.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -603,6 +629,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f64.f64( , + , double, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfslide1up.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -649,6 +677,7 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f64.f64( , + , double, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfslide1up.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 645fb34..e627b8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -6,6 +6,7 @@ declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { @@ -16,6 +17,7 @@ define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( @llvm.riscv.vfsub.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -51,6 +53,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( @llvm.riscv.vfsub.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { @@ -106,6 +111,7 @@ define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( @llvm.riscv.vfsub.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { @@ -151,6 +158,7 @@ define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( @llvm.riscv.vfsub.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -186,6 +194,7 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { @@ -196,6 +205,7 @@ define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( @llvm.riscv.vfsub.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( , , + , iXLen); define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { @@ -241,6 +252,7 @@ define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( @llvm.riscv.vfsub.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -277,6 +289,7 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( , , + , iXLen); define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { @@ -287,6 +300,7 @@ define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( @llvm.riscv.vfsub.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -322,6 +336,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( , , + , iXLen); define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { @@ -332,6 +347,7 @@ define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( @llvm.riscv.vfsub.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -367,6 +383,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( , , + , iXLen); define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { @@ -377,6 +394,7 @@ define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( @llvm.riscv.vfsub.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( , , + , iXLen); define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { @@ -422,6 +441,7 @@ define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( @llvm.riscv.vfsub.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( , , + , iXLen); define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { @@ -467,6 +488,7 @@ define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( @llvm.riscv.vfsub.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -503,6 +525,7 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( , , + , iXLen); define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { @@ -513,6 +536,7 @@ define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( @llvm.riscv.vfsub.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -548,6 +572,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( , , + , iXLen); define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { @@ -558,6 +583,7 @@ define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( @llvm.riscv.vfsub.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -593,6 +619,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( , , + , iXLen); define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { @@ -603,6 +630,7 @@ define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( @llvm.riscv.vfsub.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -638,6 +666,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( , , + , iXLen); define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { @@ -648,6 +677,7 @@ define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( @llvm.riscv.vfsub.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -683,6 +713,7 @@ entry: declare @llvm.riscv.vfsub.nxv1f16.f16( , + , half, iXLen); @@ -694,6 +725,7 @@ define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -728,6 +760,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.f16( , + , half, iXLen); @@ -739,6 +772,7 @@ define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -773,6 +807,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.f16( , + , half, iXLen); @@ -784,6 +819,7 @@ define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -818,6 +854,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.f16( , + , half, iXLen); @@ -829,6 +866,7 @@ define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -863,6 +901,7 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.f16( , + , half, iXLen); @@ -874,6 +913,7 @@ define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -908,6 +948,7 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.f16( , + , half, iXLen); @@ -919,6 +960,7 @@ define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -953,6 +995,7 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.f32( , + , float, iXLen); @@ -964,6 +1007,7 @@ define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -998,6 +1042,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.f32( , + , float, iXLen); @@ -1009,6 +1054,7 @@ define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1043,6 +1089,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.f32( , + , float, iXLen); @@ -1054,6 +1101,7 @@ define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1088,6 +1136,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.f32( , + , float, iXLen); @@ -1099,6 +1148,7 @@ define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1133,6 +1183,7 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.f32( , + , float, iXLen); @@ -1144,6 +1195,7 @@ define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1178,6 +1230,7 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.f64( , + , double, iXLen); @@ -1189,6 +1242,7 @@ define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1223,6 +1277,7 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.f64( , + , double, iXLen); @@ -1234,6 +1289,7 @@ define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1268,6 +1324,7 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.f64( , + , double, iXLen); @@ -1279,6 +1336,7 @@ define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1313,6 +1371,7 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.f64( , + , double, iXLen); @@ -1324,6 +1383,7 @@ define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index 541f2b8..5910e79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 28cdfbf..1027e08 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , + , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( , + , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( , + , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -140,6 +146,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( , + , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -185,6 +193,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( , + , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( , + , , iXLen); @@ -242,6 +253,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -276,6 +288,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( , + , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -321,6 +335,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( , + , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -366,6 +382,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( , + , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f32.f16( , + , half, iXLen); @@ -423,6 +442,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( @llvm.riscv.vfwadd.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.f16( , + , half, iXLen); @@ -468,6 +489,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( @llvm.riscv.vfwadd.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -502,6 +524,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.f16( , + , half, iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( @llvm.riscv.vfwadd.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -547,6 +571,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.f16( , + , half, iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( @llvm.riscv.vfwadd.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -592,6 +618,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.f16( , + , half, iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( @llvm.riscv.vfwadd.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -637,6 +665,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.f32( , + , float, iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( @llvm.riscv.vfwadd.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -682,6 +712,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.f32( , + , float, iXLen); @@ -693,6 +724,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( @llvm.riscv.vfwadd.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -727,6 +759,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.f32( , + , float, iXLen); @@ -738,6 +771,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( @llvm.riscv.vfwadd.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -772,6 +806,7 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.f32( , + , float, iXLen); @@ -783,6 +818,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( @llvm.riscv.vfwadd.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index b1ec846..017a9c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index 916abca..c7ed210 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ entry: } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index b5d008c..1b20551 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -5,6 +5,7 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , + , , iXLen); @@ -16,6 +17,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( , + , , iXLen); @@ -61,6 +64,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( , + , , iXLen); @@ -106,6 +111,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -140,6 +146,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( , + , , iXLen); @@ -151,6 +158,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -185,6 +193,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( , + , , iXLen); @@ -196,6 +205,7 @@ define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -231,6 +241,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( , + , , iXLen); @@ -242,6 +253,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -276,6 +288,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( , + , , iXLen); @@ -287,6 +300,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -321,6 +335,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( , + , , iXLen); @@ -332,6 +347,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -366,6 +382,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( , + , , iXLen); @@ -377,6 +394,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -412,6 +430,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f32.f16( , + , half, iXLen); @@ -423,6 +442,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( @llvm.riscv.vfwsub.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -457,6 +477,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.f16( , + , half, iXLen); @@ -468,6 +489,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( @llvm.riscv.vfwsub.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -502,6 +524,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.f16( , + , half, iXLen); @@ -513,6 +536,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( @llvm.riscv.vfwsub.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -547,6 +571,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.f16( , + , half, iXLen); @@ -558,6 +583,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( @llvm.riscv.vfwsub.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -592,6 +618,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.f16( , + , half, iXLen); @@ -603,6 +630,7 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( @llvm.riscv.vfwsub.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -637,6 +665,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.f32( , + , float, iXLen); @@ -648,6 +677,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( @llvm.riscv.vfwsub.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -682,6 +712,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.f32( , + , float, iXLen); @@ -693,6 +724,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( @llvm.riscv.vfwsub.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -727,6 +759,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.f32( , + , float, iXLen); @@ -738,6 +771,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( @llvm.riscv.vfwsub.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -772,6 +806,7 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.f32( , + , float, iXLen); @@ -783,6 +818,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( @llvm.riscv.vfwsub.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll index 1d51eb6..2f431ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmax.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmax.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmax.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmax.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmax.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmax.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmax.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmax.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmax.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmax.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmax.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmax.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmax.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmax.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmax.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmax.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmax.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmax.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmax.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmax.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmax.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmax.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmax.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmax.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmax.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmax.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmax.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmax.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmax.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmax.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmax.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmax.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmax.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmax.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmax.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmax.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmax.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmax.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmax.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmax.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmax.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmax.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmax.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmax.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll index da40c60..49f63fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmax.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmax.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmax.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmax.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmax.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmax.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmax.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmax.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmax.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmax.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmax.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmax.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmax.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmax.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmax.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmax.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmax.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmax.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmax.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmax.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmax.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmax.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmax.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmax.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmax.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmax.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmax.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmax.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmax.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmax.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmax.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmax.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmax.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmax.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmax.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmax.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmax.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmax.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmax.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmax.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmax.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmax.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmax.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmax.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll index af080ef..9523f19 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmaxu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll index bc63dae..caaeaf6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmaxu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmaxu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmaxu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmaxu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmaxu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmaxu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmaxu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll index ebc304b..54c2463 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmin.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmin.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmin.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmin.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmin.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmin.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmin.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmin.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmin.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmin.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmin.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmin.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmin.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmin.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmin.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmin.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmin.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmin.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmin.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmin.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmin.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmin.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmin.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmin.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmin.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmin.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmin.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmin.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmin.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmin.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmin.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmin.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmin.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmin.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmin.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmin.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmin.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmin.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmin.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmin.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmin.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmin.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmin.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmin.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll index febe874..4cc67d6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmin.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmin.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmin.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmin.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmin.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmin.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmin.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmin.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmin.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmin.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmin.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmin.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmin.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmin.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmin.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmin.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmin.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmin.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmin.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmin.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmin.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmin.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmin.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmin.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmin.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmin.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmin.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmin.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmin.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmin.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmin.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmin.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmin.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmin.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmin.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmin.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmin.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmin.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmin.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmin.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmin.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmin.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmin.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmin.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll index ec3cc01..5f024c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vminu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vminu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vminu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vminu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vminu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vminu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vminu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vminu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vminu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vminu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vminu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vminu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vminu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vminu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vminu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vminu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vminu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vminu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vminu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vminu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vminu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vminu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vminu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vminu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vminu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vminu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vminu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vminu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vminu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vminu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vminu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vminu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vminu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vminu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vminu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vminu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vminu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vminu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vminu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vminu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vminu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vminu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vminu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vminu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll index f3b9100..93881f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vminu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vminu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vminu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vminu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vminu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vminu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vminu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vminu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vminu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vminu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vminu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vminu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vminu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vminu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vminu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vminu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vminu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vminu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vminu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vminu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vminu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vminu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vminu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vminu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vminu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vminu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vminu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vminu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vminu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vminu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vminu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vminu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vminu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vminu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vminu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vminu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vminu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vminu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vminu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vminu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vminu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vminu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vminu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vminu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll index cf0c1da..6727bb5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmul.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmul.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmul.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmul.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmul.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmul.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmul.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmul.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmul.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmul.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmul.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmul.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmul.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmul.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmul.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmul.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmul.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmul.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmul.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmul.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmul.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmul.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmul.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmul.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmul.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmul.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmul.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmul.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmul.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmul.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmul.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmul.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmul.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmul.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmul.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmul.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmul.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmul.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmul.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmul.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmul.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmul.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmul.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmul.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll index adf7466..28c2d77 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmul.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmul.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmul.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmul.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmul.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmul.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmul.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmul.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmul.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmul.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmul.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmul.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmul.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmul.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmul.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmul.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmul.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmul.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmul.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmul.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmul.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmul.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmul.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmul.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmul.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmul.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmul.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmul.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmul.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmul.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmul.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmul.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmul.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmul.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmul.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmul.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmul.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmul.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmul.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmul.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmul.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmul.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmul.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmul.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll index 51e254f..4bd0f83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulh.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll index 9242b62..2fd9384 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulh.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulh.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulh.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulh.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmulh.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmulh.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmulh.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll index 068a53f..28afafa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll index 328c909..fd7f4a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmulhsu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll index 8c04d78..a20149f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulhu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll index 48ce2b2..b3b7352 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vmulhu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vmulhu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vmulhu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vmulhu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vmulhu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vmulhu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vmulhu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll index 3dfadcc..9452abe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll index dc0f3f8..df8def1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll index 1fb5c65..7d368f9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll index 0fd55a7..4a41fe9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll index 123dfbe..e29de52 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll index b6ad650..c9686b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll index 065203a..1d83386 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll index 8f8a89a..942e9cc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ entry: } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll index ab10d1c..3068585 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vor.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vor.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vor.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vor.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vor.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vor.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vor.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vor.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vor.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vor.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vor.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vor.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vor.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vor.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vor.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vor.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vor.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vor.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vor.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vor.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vor.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vor.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vor.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vor.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vor.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vor.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vor.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vor.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vor.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vor.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vor.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vor.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vor.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vor.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vor.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vor.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vor.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vor.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vor.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vor.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vor.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vor.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vor.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vor.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll index d30bf62..57be344 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vor.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vor.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vor.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vor.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vor.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vor.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vor.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vor.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vor.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vor.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vor.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vor.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vor.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vor.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vor.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vor.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vor.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vor.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vor.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vor.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vor.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vor.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vor.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vor.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vor.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vor.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vor.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vor.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vor.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vor.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vor.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vor.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vor.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vor.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vor.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vor.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vor.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vor.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vor.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vor.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vor.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vor.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vor.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vor.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll index e9a24bc..54b8d88 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vrem.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vrem.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vrem.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vrem.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vrem.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vrem.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vrem.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vrem.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vrem.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vrem.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vrem.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vrem.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vrem.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vrem.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vrem.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vrem.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vrem.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vrem.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vrem.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vrem.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vrem.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vrem.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vrem.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vrem.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vrem.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vrem.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vrem.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vrem.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vrem.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vrem.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vrem.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vrem.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vrem.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vrem.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vrem.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vrem.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vrem.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vrem.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vrem.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vrem.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vrem.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vrem.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vrem.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vrem.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll index 100b79b..4c89419 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vrem.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vrem.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vrem.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vrem.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vrem.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vrem.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vrem.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vrem.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vrem.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vrem.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vrem.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vrem.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vrem.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vrem.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vrem.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vrem.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vrem.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vrem.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vrem.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vrem.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vrem.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vrem.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vrem.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vrem.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vrem.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vrem.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vrem.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vrem.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vrem.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vrem.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vrem.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vrem.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vrem.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vrem.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vrem.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vrem.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vrem.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vrem.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vrem.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vrem.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vrem.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vrem.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vrem.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vrem.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll index 6b3cf9d..1e5cdb8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vremu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vremu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vremu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vremu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vremu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vremu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vremu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vremu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vremu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vremu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vremu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vremu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vremu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vremu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vremu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vremu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vremu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vremu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vremu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vremu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vremu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vremu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vremu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vremu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vremu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vremu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vremu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vremu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vremu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vremu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vremu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vremu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vremu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vremu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vremu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vremu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vremu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vremu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vremu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vremu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vremu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vremu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vremu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vremu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll index 9b5c03b..543868c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vremu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vremu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vremu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vremu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vremu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vremu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vremu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vremu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vremu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vremu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vremu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vremu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vremu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vremu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vremu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vremu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vremu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vremu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vremu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vremu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vremu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vremu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vremu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vremu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vremu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vremu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vremu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vremu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vremu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vremu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vremu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vremu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vremu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vremu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vremu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vremu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vremu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vremu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vremu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vremu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vremu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vremu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vremu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vremu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll index 75e05a9..e529362 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vrgather.vv.nxv1i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -15,6 +16,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrgather.vv.nxv1i8.i32( + undef, %0, %1, i32 %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrgather.vv.nxv2i8.i32( + undef, %0, %1, i32 %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -107,6 +112,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrgather.vv.nxv4i8.i32( + undef, %0, %1, i32 %2) @@ -142,6 +148,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -153,6 +160,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrgather.vv.nxv8i8.i32( + undef, %0, %1, i32 %2) @@ -188,6 +196,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -199,6 +208,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrgather.vv.nxv16i8.i32( + undef, %0, %1, i32 %2) @@ -234,6 +244,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -245,6 +256,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrgather.vv.nxv32i8.i32( + undef, %0, %1, i32 %2) @@ -280,6 +292,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv64i8.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -291,6 +304,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrgather.vv.nxv64i8.i32( + undef, %0, %1, i32 %2) @@ -327,6 +341,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -338,6 +353,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1i16.i32( + undef, %0, %1, i32 %2) @@ -373,6 +389,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -384,6 +401,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2i16.i32( + undef, %0, %1, i32 %2) @@ -419,6 +437,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -430,6 +449,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4i16.i32( + undef, %0, %1, i32 %2) @@ -465,6 +485,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -476,6 +497,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8i16.i32( + undef, %0, %1, i32 %2) @@ -511,6 +533,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -522,6 +545,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16i16.i32( + undef, %0, %1, i32 %2) @@ -557,6 +581,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32i16.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -568,6 +593,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32i16.i32( + undef, %0, %1, i32 %2) @@ -604,6 +630,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1i32.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -615,6 +642,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1i32.i32( + undef, %0, %1, i32 %2) @@ -650,6 +678,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i32.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -661,6 +690,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2i32.i32( + undef, %0, %1, i32 %2) @@ -696,6 +726,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i32.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -707,6 +738,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4i32.i32( + undef, %0, %1, i32 %2) @@ -742,6 +774,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i32.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -753,6 +786,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8i32.i32( + undef, %0, %1, i32 %2) @@ -788,6 +822,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i32.i32( , , + , i32); define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -799,6 +834,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrgather.vv.nxv16i32.i32( + undef, %0, %1, i32 %2) @@ -834,6 +870,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f16.i32( , + , , i32); @@ -846,6 +883,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1f16.i32( + undef, %0, %1, i32 %2) @@ -880,6 +918,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f16.i32( , + , , i32); @@ -892,6 +931,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2f16.i32( + undef, %0, %1, i32 %2) @@ -926,6 +966,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f16.i32( , + , , i32); @@ -938,6 +979,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4f16.i32( + undef, %0, %1, i32 %2) @@ -972,6 +1014,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f16.i32( , + , , i32); @@ -984,6 +1027,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8f16.i32( + undef, %0, %1, i32 %2) @@ -1018,6 +1062,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16f16.i32( , + , , i32); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16f16.i32( + undef, %0, %1, i32 %2) @@ -1064,6 +1110,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32f16.i32( , + , , i32); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32f16.i32( + undef, %0, %1, i32 %2) @@ -1111,6 +1159,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f32.i32( , + , , i32); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1f32.i32( + undef, %0, %1, i32 %2) @@ -1157,6 +1207,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f32.i32( , + , , i32); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2f32.i32( + undef, %0, %1, i32 %2) @@ -1203,6 +1255,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f32.i32( , + , , i32); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4f32.i32( + undef, %0, %1, i32 %2) @@ -1249,6 +1303,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f32.i32( , + , , i32); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8f32.i32( + undef, %0, %1, i32 %2) @@ -1295,6 +1351,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16f32.i32( , + , , i32); @@ -1307,6 +1364,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i32( + undef, %0, %1, i32 %2) @@ -1342,6 +1400,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f64.i32( , + , , i32); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1f64.i32( + undef, %0, %1, i32 %2) @@ -1388,6 +1448,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f64.i32( , + , , i32); @@ -1400,6 +1461,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2f64.i32( + undef, %0, %1, i32 %2) @@ -1434,6 +1496,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f64.i32( , + , , i32); @@ -1446,6 +1509,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4f64.i32( + undef, %0, %1, i32 %2) @@ -1480,6 +1544,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f64.i32( , + , , i32); @@ -1492,6 +1557,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8f64.i32( + undef, %0, %1, i32 %2) @@ -1527,6 +1593,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i8.i32( , + , i32, i32); @@ -1539,6 +1606,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32( @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1573,6 +1641,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i8.i32( , + , i32, i32); @@ -1585,6 +1654,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32( @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1619,6 +1689,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i8.i32( , + , i32, i32); @@ -1631,6 +1702,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32( @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1665,6 +1737,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i8.i32( , + , i32, i32); @@ -1677,6 +1750,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32( @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1711,6 +1785,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i8.i32( , + , i32, i32); @@ -1723,6 +1798,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32( @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1757,6 +1833,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32i8.i32( , + , i32, i32); @@ -1769,6 +1846,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32( @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1803,6 +1881,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv64i8.i32( , + , i32, i32); @@ -1815,6 +1894,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32( @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1849,6 +1929,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i16.i32( , + , i32, i32); @@ -1861,6 +1942,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32( @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1895,6 +1977,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i16.i32( , + , i32, i32); @@ -1907,6 +1990,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32( @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2025,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i16.i32( , + , i32, i32); @@ -1953,6 +2038,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32( @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1987,6 +2073,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i16.i32( , + , i32, i32); @@ -1999,6 +2086,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32( @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2033,6 +2121,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i16.i32( , + , i32, i32); @@ -2045,6 +2134,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32( @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2079,6 +2169,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32i16.i32( , + , i32, i32); @@ -2091,6 +2182,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32( @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2125,6 +2217,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i32.i32( , + , i32, i32); @@ -2137,6 +2230,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2171,6 +2265,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i32.i32( , + , i32, i32); @@ -2183,6 +2278,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2217,6 +2313,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i32.i32( , + , i32, i32); @@ -2229,6 +2326,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2263,6 +2361,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i32.i32( , + , i32, i32); @@ -2275,6 +2374,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2309,6 +2409,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i32.i32( , + , i32, i32); @@ -2321,6 +2422,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2355,6 +2457,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f16.i32( , + , i32, i32); @@ -2367,6 +2470,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32( @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2401,6 +2505,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f16.i32( , + , i32, i32); @@ -2413,6 +2518,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32( @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2447,6 +2553,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f16.i32( , + , i32, i32); @@ -2459,6 +2566,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32( @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2493,6 +2601,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f16.i32( , + , i32, i32); @@ -2505,6 +2614,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32( @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2539,6 +2649,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16f16.i32( , + , i32, i32); @@ -2551,6 +2662,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32( @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2585,6 +2697,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32f16.i32( , + , i32, i32); @@ -2597,6 +2710,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32( @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2631,6 +2745,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f32.i32( , + , i32, i32); @@ -2643,6 +2758,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2677,6 +2793,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f32.i32( , + , i32, i32); @@ -2689,6 +2806,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2723,6 +2841,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f32.i32( , + , i32, i32); @@ -2735,6 +2854,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2769,6 +2889,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f32.i32( , + , i32, i32); @@ -2781,6 +2902,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2815,6 +2937,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16f32.i32( , + , i32, i32); @@ -2827,6 +2950,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2861,6 +2985,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f64.i32( , + , i32, i32); @@ -2873,6 +2998,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32( @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2907,6 +3033,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f64.i32( , + , i32, i32); @@ -2919,6 +3046,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32( @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2953,6 +3081,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f64.i32( , + , i32, i32); @@ -2965,6 +3094,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32( @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2999,6 +3129,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f64.i32( , + , i32, i32); @@ -3011,6 +3142,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32( @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 %1, i32 %2) @@ -3052,6 +3184,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32( @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 9, i32 %1) @@ -3085,6 +3218,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32( @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 9, i32 %1) @@ -3118,6 +3252,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32( @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 9, i32 %1) @@ -3151,6 +3286,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32( @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 9, i32 %1) @@ -3184,6 +3320,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32( @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 9, i32 %1) @@ -3217,6 +3354,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32( @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 9, i32 %1) @@ -3250,6 +3388,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32( @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 9, i32 %1) @@ -3283,6 +3422,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32( @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 9, i32 %1) @@ -3316,6 +3456,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32( @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 9, i32 %1) @@ -3349,6 +3490,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32( @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 9, i32 %1) @@ -3382,6 +3524,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32( @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 9, i32 %1) @@ -3415,6 +3558,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32( @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 9, i32 %1) @@ -3448,6 +3592,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32( @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 9, i32 %1) @@ -3481,6 +3626,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -3514,6 +3660,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -3547,6 +3694,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -3580,6 +3728,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -3613,6 +3762,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -3646,6 +3796,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32( @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 9, i32 %1) @@ -3679,6 +3830,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32( @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 9, i32 %1) @@ -3712,6 +3864,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32( @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 9, i32 %1) @@ -3745,6 +3898,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32( @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 9, i32 %1) @@ -3778,6 +3932,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32( @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 9, i32 %1) @@ -3811,6 +3966,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32( @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 9, i32 %1) @@ -3844,6 +4000,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 9, i32 %1) @@ -3877,6 +4034,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 9, i32 %1) @@ -3910,6 +4068,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 9, i32 %1) @@ -3943,6 +4102,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 9, i32 %1) @@ -3976,6 +4136,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 9, i32 %1) @@ -4009,6 +4170,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32( @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 9, i32 %1) @@ -4042,6 +4204,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32( @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 9, i32 %1) @@ -4075,6 +4238,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32( @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 9, i32 %1) @@ -4108,6 +4272,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32( @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll index a15e7ba..3a9ab1e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vrgather.vv.nxv1i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -15,6 +16,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vrgather.vv.nxv1i8.i64( + undef, %0, %1, i64 %2) @@ -50,6 +52,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -61,6 +64,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vrgather.vv.nxv2i8.i64( + undef, %0, %1, i64 %2) @@ -96,6 +100,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -107,6 +112,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vrgather.vv.nxv4i8.i64( + undef, %0, %1, i64 %2) @@ -142,6 +148,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -153,6 +160,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vrgather.vv.nxv8i8.i64( + undef, %0, %1, i64 %2) @@ -188,6 +196,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -199,6 +208,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vrgather.vv.nxv16i8.i64( + undef, %0, %1, i64 %2) @@ -234,6 +244,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -245,6 +256,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vrgather.vv.nxv32i8.i64( + undef, %0, %1, i64 %2) @@ -280,6 +292,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv64i8.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -291,6 +304,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vrgather.vv.nxv64i8.i64( + undef, %0, %1, i64 %2) @@ -327,6 +341,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -338,6 +353,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1i16.i64( + undef, %0, %1, i64 %2) @@ -373,6 +389,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -384,6 +401,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2i16.i64( + undef, %0, %1, i64 %2) @@ -419,6 +437,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -430,6 +449,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4i16.i64( + undef, %0, %1, i64 %2) @@ -465,6 +485,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -476,6 +497,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8i16.i64( + undef, %0, %1, i64 %2) @@ -511,6 +533,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -522,6 +545,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16i16.i64( + undef, %0, %1, i64 %2) @@ -557,6 +581,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32i16.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -568,6 +593,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32i16.i64( + undef, %0, %1, i64 %2) @@ -604,6 +630,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1i32.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -615,6 +642,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1i32.i64( + undef, %0, %1, i64 %2) @@ -650,6 +678,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i32.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -661,6 +690,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2i32.i64( + undef, %0, %1, i64 %2) @@ -696,6 +726,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i32.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -707,6 +738,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4i32.i64( + undef, %0, %1, i64 %2) @@ -742,6 +774,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i32.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -753,6 +786,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8i32.i64( + undef, %0, %1, i64 %2) @@ -788,6 +822,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16i32.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -799,6 +834,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vrgather.vv.nxv16i32.i64( + undef, %0, %1, i64 %2) @@ -835,6 +871,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1i64.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -846,6 +883,7 @@ define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1i64.i64( + undef, %0, %1, i64 %2) @@ -881,6 +919,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2i64.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -892,6 +931,7 @@ define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2i64.i64( + undef, %0, %1, i64 %2) @@ -927,6 +967,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4i64.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -938,6 +979,7 @@ define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4i64.i64( + undef, %0, %1, i64 %2) @@ -973,6 +1015,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8i64.i64( , , + , i64); define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -984,6 +1027,7 @@ define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8i64.i64( + undef, %0, %1, i64 %2) @@ -1019,6 +1063,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f16.i64( , + , , i64); @@ -1031,6 +1076,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( @llvm.riscv.vrgather.vv.nxv1f16.i64( + undef, %0, %1, i64 %2) @@ -1065,6 +1111,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f16.i64( , + , , i64); @@ -1077,6 +1124,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( @llvm.riscv.vrgather.vv.nxv2f16.i64( + undef, %0, %1, i64 %2) @@ -1111,6 +1159,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f16.i64( , + , , i64); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( @llvm.riscv.vrgather.vv.nxv4f16.i64( + undef, %0, %1, i64 %2) @@ -1157,6 +1207,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f16.i64( , + , , i64); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( @llvm.riscv.vrgather.vv.nxv8f16.i64( + undef, %0, %1, i64 %2) @@ -1203,6 +1255,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16f16.i64( , + , , i64); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( @llvm.riscv.vrgather.vv.nxv16f16.i64( + undef, %0, %1, i64 %2) @@ -1249,6 +1303,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv32f16.i64( , + , , i64); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( @llvm.riscv.vrgather.vv.nxv32f16.i64( + undef, %0, %1, i64 %2) @@ -1296,6 +1352,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f32.i64( , + , , i64); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( @llvm.riscv.vrgather.vv.nxv1f32.i64( + undef, %0, %1, i64 %2) @@ -1342,6 +1400,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f32.i64( , + , , i64); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( @llvm.riscv.vrgather.vv.nxv2f32.i64( + undef, %0, %1, i64 %2) @@ -1388,6 +1448,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f32.i64( , + , , i64); @@ -1400,6 +1461,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( @llvm.riscv.vrgather.vv.nxv4f32.i64( + undef, %0, %1, i64 %2) @@ -1434,6 +1496,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f32.i64( , + , , i64); @@ -1446,6 +1509,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( @llvm.riscv.vrgather.vv.nxv8f32.i64( + undef, %0, %1, i64 %2) @@ -1480,6 +1544,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv16f32.i64( , + , , i64); @@ -1492,6 +1557,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i64( + undef, %0, %1, i64 %2) @@ -1527,6 +1593,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv1f64.i64( , + , , i64); @@ -1539,6 +1606,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( @llvm.riscv.vrgather.vv.nxv1f64.i64( + undef, %0, %1, i64 %2) @@ -1573,6 +1641,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv2f64.i64( , + , , i64); @@ -1585,6 +1654,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( @llvm.riscv.vrgather.vv.nxv2f64.i64( + undef, %0, %1, i64 %2) @@ -1619,6 +1689,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv4f64.i64( , + , , i64); @@ -1631,6 +1702,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( @llvm.riscv.vrgather.vv.nxv4f64.i64( + undef, %0, %1, i64 %2) @@ -1665,6 +1737,7 @@ entry: declare @llvm.riscv.vrgather.vv.nxv8f64.i64( , + , , i64); @@ -1677,6 +1750,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( @llvm.riscv.vrgather.vv.nxv8f64.i64( + undef, %0, %1, i64 %2) @@ -1712,6 +1786,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i8.i64( , + , i64, i64); @@ -1724,6 +1799,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64( @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1758,6 +1834,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i8.i64( , + , i64, i64); @@ -1770,6 +1847,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64( @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1804,6 +1882,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i8.i64( , + , i64, i64); @@ -1816,6 +1895,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64( @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1850,6 +1930,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i8.i64( , + , i64, i64); @@ -1862,6 +1943,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64( @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1978,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i8.i64( , + , i64, i64); @@ -1908,6 +1991,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64( @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2026,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32i8.i64( , + , i64, i64); @@ -1954,6 +2039,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64( @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1988,6 +2074,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv64i8.i64( , + , i64, i64); @@ -2000,6 +2087,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64( @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 %1, i64 %2) @@ -2034,6 +2122,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i16.i64( , + , i64, i64); @@ -2046,6 +2135,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64( @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2080,6 +2170,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i16.i64( , + , i64, i64); @@ -2092,6 +2183,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64( @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2126,6 +2218,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i16.i64( , + , i64, i64); @@ -2138,6 +2231,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64( @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2172,6 +2266,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i16.i64( , + , i64, i64); @@ -2184,6 +2279,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64( @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2218,6 +2314,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i16.i64( , + , i64, i64); @@ -2230,6 +2327,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64( @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2264,6 +2362,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32i16.i64( , + , i64, i64); @@ -2276,6 +2375,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64( @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2310,6 +2410,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i32.i64( , + , i64, i64); @@ -2322,6 +2423,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64( @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2356,6 +2458,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i32.i64( , + , i64, i64); @@ -2368,6 +2471,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64( @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2402,6 +2506,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i32.i64( , + , i64, i64); @@ -2414,6 +2519,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64( @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2448,6 +2554,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i32.i64( , + , i64, i64); @@ -2460,6 +2567,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64( @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2494,6 +2602,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16i32.i64( , + , i64, i64); @@ -2506,6 +2615,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64( @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2540,6 +2650,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1i64.i64( , + , i64, i64); @@ -2552,6 +2663,7 @@ define @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2586,6 +2698,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2i64.i64( , + , i64, i64); @@ -2598,6 +2711,7 @@ define @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2632,6 +2746,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4i64.i64( , + , i64, i64); @@ -2644,6 +2759,7 @@ define @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2678,6 +2794,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8i64.i64( , + , i64, i64); @@ -2690,6 +2807,7 @@ define @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2724,6 +2842,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f16.i64( , + , i64, i64); @@ -2736,6 +2855,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64( @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2770,6 +2890,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f16.i64( , + , i64, i64); @@ -2782,6 +2903,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64( @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2816,6 +2938,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f16.i64( , + , i64, i64); @@ -2828,6 +2951,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64( @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2862,6 +2986,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f16.i64( , + , i64, i64); @@ -2874,6 +2999,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64( @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2908,6 +3034,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16f16.i64( , + , i64, i64); @@ -2920,6 +3047,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64( @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2954,6 +3082,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv32f16.i64( , + , i64, i64); @@ -2966,6 +3095,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64( @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 %1, i64 %2) @@ -3000,6 +3130,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f32.i64( , + , i64, i64); @@ -3012,6 +3143,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64( @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3046,6 +3178,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f32.i64( , + , i64, i64); @@ -3058,6 +3191,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64( @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3092,6 +3226,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f32.i64( , + , i64, i64); @@ -3104,6 +3239,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64( @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3138,6 +3274,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f32.i64( , + , i64, i64); @@ -3150,6 +3287,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64( @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3184,6 +3322,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv16f32.i64( , + , i64, i64); @@ -3196,6 +3335,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64( @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3230,6 +3370,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv1f64.i64( , + , i64, i64); @@ -3242,6 +3383,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64( @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3276,6 +3418,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv2f64.i64( , + , i64, i64); @@ -3288,6 +3431,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64( @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3322,6 +3466,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv4f64.i64( , + , i64, i64); @@ -3334,6 +3479,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64( @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3368,6 +3514,7 @@ entry: declare @llvm.riscv.vrgather.vx.nxv8f64.i64( , + , i64, i64); @@ -3380,6 +3527,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64( @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3421,6 +3569,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64( @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 9, i64 %1) @@ -3454,6 +3603,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64( @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 9, i64 %1) @@ -3487,6 +3637,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64( @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 9, i64 %1) @@ -3520,6 +3671,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64( @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 9, i64 %1) @@ -3553,6 +3705,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64( @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 9, i64 %1) @@ -3586,6 +3739,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64( @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 9, i64 %1) @@ -3619,6 +3773,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64( @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 9, i64 %1) @@ -3652,6 +3807,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64( @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 9, i64 %1) @@ -3685,6 +3841,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64( @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 9, i64 %1) @@ -3718,6 +3875,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64( @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 9, i64 %1) @@ -3751,6 +3909,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64( @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 9, i64 %1) @@ -3784,6 +3943,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64( @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 9, i64 %1) @@ -3817,6 +3977,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64( @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 9, i64 %1) @@ -3850,6 +4011,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64( @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 9, i64 %1) @@ -3883,6 +4045,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64( @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 9, i64 %1) @@ -3916,6 +4079,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64( @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 9, i64 %1) @@ -3949,6 +4113,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64( @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 9, i64 %1) @@ -3982,6 +4147,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64( @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 9, i64 %1) @@ -4015,6 +4181,7 @@ define @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -4048,6 +4215,7 @@ define @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -4081,6 +4249,7 @@ define @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -4114,6 +4283,7 @@ define @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 9, i64 %1) @@ -4147,6 +4317,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64( @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 9, i64 %1) @@ -4180,6 +4351,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64( @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 9, i64 %1) @@ -4213,6 +4385,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64( @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 9, i64 %1) @@ -4246,6 +4419,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64( @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 9, i64 %1) @@ -4279,6 +4453,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64( @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 9, i64 %1) @@ -4312,6 +4487,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64( @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 9, i64 %1) @@ -4345,6 +4521,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64( @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 9, i64 %1) @@ -4378,6 +4555,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64( @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 9, i64 %1) @@ -4411,6 +4589,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64( @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 9, i64 %1) @@ -4444,6 +4623,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64( @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 9, i64 %1) @@ -4477,6 +4657,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64( @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 9, i64 %1) @@ -4510,6 +4691,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64( @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 9, i64 %1) @@ -4543,6 +4725,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64( @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 9, i64 %1) @@ -4576,6 +4759,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64( @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 9, i64 %1) @@ -4609,6 +4793,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64( @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll index 720d6d8..522c617 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( , + , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2i8( , + , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i32 %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i8( , + , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i32 %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i8( , + , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i32 %2) @@ -187,6 +195,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i8( , + , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i32 %2) @@ -233,6 +243,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32i8( , + , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i32 %2) @@ -280,6 +292,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -291,6 +304,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i32 %2) @@ -326,6 +340,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -337,6 +352,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i32 %2) @@ -372,6 +388,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -383,6 +400,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i32 %2) @@ -418,6 +436,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -429,6 +448,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i32 %2) @@ -464,6 +484,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -475,6 +496,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i32 %2) @@ -510,6 +532,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32i16( , , + , i32); define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -521,6 +544,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i32 %2) @@ -556,6 +580,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1i32( , + , , i32); @@ -568,6 +593,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i32 %2) @@ -602,6 +628,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i32( , + , , i32); @@ -614,6 +641,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i32 %2) @@ -648,6 +676,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i32( , + , , i32); @@ -660,6 +689,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i32 %2) @@ -694,6 +724,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i32( , + , , i32); @@ -706,6 +737,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i32 %2) @@ -741,6 +773,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i64( , + , , i32); @@ -753,6 +786,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i32 %2) @@ -787,6 +821,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i64( , + , , i32); @@ -799,6 +834,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i32 %2) @@ -834,6 +870,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1f16( , + , , i32); @@ -846,6 +883,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i32 %2) @@ -880,6 +918,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2f16( , + , , i32); @@ -892,6 +931,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i32 %2) @@ -926,6 +966,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f16( , + , , i32); @@ -938,6 +979,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i32 %2) @@ -972,6 +1014,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f16( , + , , i32); @@ -984,6 +1027,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i32 %2) @@ -1018,6 +1062,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16f16( , + , , i32); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i32 %2) @@ -1064,6 +1110,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32f16( , + , , i32); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i32 %2) @@ -1111,6 +1159,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1f32( , + , , i32); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i32 %2) @@ -1157,6 +1207,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f32( , + , , i32); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i32 %2) @@ -1203,6 +1255,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f32( , + , , i32); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i32 %2) @@ -1249,6 +1303,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16f32( , + , , i32); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i32 %2) @@ -1296,6 +1352,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f64( , + , , i32); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i32 %2) @@ -1342,6 +1400,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f64( , + , , i32); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll index 9592630..c31e6e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( , + , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2i8( , + , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i64 %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i8( , + , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i64 %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i8( , + , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i64 %2) @@ -187,6 +195,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i8( , + , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i64 %2) @@ -233,6 +243,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32i8( , + , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i64 %2) @@ -280,6 +292,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -291,6 +304,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i64 %2) @@ -326,6 +340,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -337,6 +352,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i64 %2) @@ -372,6 +388,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -383,6 +400,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i64 %2) @@ -418,6 +436,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -429,6 +448,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i64 %2) @@ -464,6 +484,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -475,6 +496,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i64 %2) @@ -510,6 +532,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32i16( , , + , i64); define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -521,6 +544,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i64 %2) @@ -556,6 +580,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1i32( , + , , i64); @@ -568,6 +593,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i64 %2) @@ -602,6 +628,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i32( , + , , i64); @@ -614,6 +641,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i64 %2) @@ -648,6 +676,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i32( , + , , i64); @@ -660,6 +689,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i64 %2) @@ -694,6 +724,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16i32( , + , , i64); @@ -706,6 +737,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i64 %2) @@ -741,6 +773,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4i64( , + , , i64); @@ -753,6 +786,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i64 %2) @@ -787,6 +821,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8i64( , + , , i64); @@ -799,6 +834,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i64 %2) @@ -834,6 +870,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1f16( , + , , i64); @@ -846,6 +883,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i64 %2) @@ -880,6 +918,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv2f16( , + , , i64); @@ -892,6 +931,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i64 %2) @@ -926,6 +966,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f16( , + , , i64); @@ -938,6 +979,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i64 %2) @@ -972,6 +1014,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f16( , + , , i64); @@ -984,6 +1027,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i64 %2) @@ -1018,6 +1062,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16f16( , + , , i64); @@ -1030,6 +1075,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i64 %2) @@ -1064,6 +1110,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv32f16( , + , , i64); @@ -1076,6 +1123,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i64 %2) @@ -1111,6 +1159,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv1f32( , + , , i64); @@ -1123,6 +1172,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i64 %2) @@ -1157,6 +1207,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f32( , + , , i64); @@ -1169,6 +1220,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i64 %2) @@ -1203,6 +1255,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f32( , + , , i64); @@ -1215,6 +1268,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i64 %2) @@ -1249,6 +1303,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv16f32( , + , , i64); @@ -1261,6 +1316,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i64 %2) @@ -1296,6 +1352,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv4f64( , + , , i64); @@ -1308,6 +1365,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i64 %2) @@ -1342,6 +1400,7 @@ entry: declare @llvm.riscv.vrgatherei16.vv.nxv8f64( , + , , i64); @@ -1354,6 +1413,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll index bd00557..422d326 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( , + , i8, i32); @@ -14,6 +15,7 @@ define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i8.i8( , + , i8, i32); @@ -59,6 +62,7 @@ define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i8.i8( , + , i8, i32); @@ -104,6 +109,7 @@ define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i8.i8( , + , i8, i32); @@ -149,6 +156,7 @@ define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i8.i8( , + , i8, i32); @@ -194,6 +203,7 @@ define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vrsub.nxv32i8.i8( , + , i8, i32); @@ -239,6 +250,7 @@ define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -273,6 +285,7 @@ entry: declare @llvm.riscv.vrsub.nxv64i8.i8( , + , i8, i32); @@ -284,6 +297,7 @@ define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -318,6 +332,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i16.i16( , + , i16, i32); @@ -329,6 +344,7 @@ define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -363,6 +379,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i16.i16( , + , i16, i32); @@ -374,6 +391,7 @@ define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -408,6 +426,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i16.i16( , + , i16, i32); @@ -419,6 +438,7 @@ define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -453,6 +473,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i16.i16( , + , i16, i32); @@ -464,6 +485,7 @@ define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -498,6 +520,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i16.i16( , + , i16, i32); @@ -509,6 +532,7 @@ define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -543,6 +567,7 @@ entry: declare @llvm.riscv.vrsub.nxv32i16.i16( , + , i16, i32); @@ -554,6 +579,7 @@ define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -588,6 +614,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i32.i32( , + , i32, i32); @@ -599,6 +626,7 @@ define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -633,6 +661,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i32.i32( , + , i32, i32); @@ -644,6 +673,7 @@ define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -678,6 +708,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i32.i32( , + , i32, i32); @@ -689,6 +720,7 @@ define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -723,6 +755,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i32.i32( , + , i32, i32); @@ -734,6 +767,7 @@ define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -768,6 +802,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i32.i32( , + , i32, i32); @@ -779,6 +814,7 @@ define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -813,6 +849,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i64.i64( , + , i64, i32); @@ -830,6 +867,7 @@ define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -870,6 +908,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i64.i64( , + , i64, i32); @@ -887,6 +926,7 @@ define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -927,6 +967,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i64.i64( , + , i64, i32); @@ -944,6 +985,7 @@ define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -984,6 +1026,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i64.i64( , + , i64, i32); @@ -1001,6 +1044,7 @@ define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1047,6 +1091,7 @@ define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -1079,6 +1124,7 @@ define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -1111,6 +1157,7 @@ define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -1143,6 +1190,7 @@ define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -1175,6 +1223,7 @@ define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -1207,6 +1256,7 @@ define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -1239,6 +1289,7 @@ define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -1271,6 +1322,7 @@ define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -1303,6 +1355,7 @@ define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -1335,6 +1388,7 @@ define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -1367,6 +1421,7 @@ define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -1399,6 +1454,7 @@ define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -1431,6 +1487,7 @@ define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -1463,6 +1520,7 @@ define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -1495,6 +1553,7 @@ define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -1527,6 +1586,7 @@ define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -1559,6 +1619,7 @@ define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -1591,6 +1652,7 @@ define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -1623,6 +1685,7 @@ define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -1655,6 +1718,7 @@ define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -1687,6 +1751,7 @@ define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -1719,6 +1784,7 @@ define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll index fe930ea..372a7cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( , + , i8, i64); @@ -14,6 +15,7 @@ define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i8.i8( , + , i8, i64); @@ -59,6 +62,7 @@ define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i8.i8( , + , i8, i64); @@ -104,6 +109,7 @@ define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i8.i8( , + , i8, i64); @@ -149,6 +156,7 @@ define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i8.i8( , + , i8, i64); @@ -194,6 +203,7 @@ define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vrsub.nxv32i8.i8( , + , i8, i64); @@ -239,6 +250,7 @@ define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -273,6 +285,7 @@ entry: declare @llvm.riscv.vrsub.nxv64i8.i8( , + , i8, i64); @@ -284,6 +297,7 @@ define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -318,6 +332,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i16.i16( , + , i16, i64); @@ -329,6 +344,7 @@ define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -363,6 +379,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i16.i16( , + , i16, i64); @@ -374,6 +391,7 @@ define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -408,6 +426,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i16.i16( , + , i16, i64); @@ -419,6 +438,7 @@ define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -453,6 +473,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i16.i16( , + , i16, i64); @@ -464,6 +485,7 @@ define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -498,6 +520,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i16.i16( , + , i16, i64); @@ -509,6 +532,7 @@ define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -543,6 +567,7 @@ entry: declare @llvm.riscv.vrsub.nxv32i16.i16( , + , i16, i64); @@ -554,6 +579,7 @@ define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -588,6 +614,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i32.i32( , + , i32, i64); @@ -599,6 +626,7 @@ define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -633,6 +661,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i32.i32( , + , i32, i64); @@ -644,6 +673,7 @@ define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -678,6 +708,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i32.i32( , + , i32, i64); @@ -689,6 +720,7 @@ define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -723,6 +755,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i32.i32( , + , i32, i64); @@ -734,6 +767,7 @@ define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -768,6 +802,7 @@ entry: declare @llvm.riscv.vrsub.nxv16i32.i32( , + , i32, i64); @@ -779,6 +814,7 @@ define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -813,6 +849,7 @@ entry: declare @llvm.riscv.vrsub.nxv1i64.i64( , + , i64, i64); @@ -824,6 +861,7 @@ define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -858,6 +896,7 @@ entry: declare @llvm.riscv.vrsub.nxv2i64.i64( , + , i64, i64); @@ -869,6 +908,7 @@ define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -903,6 +943,7 @@ entry: declare @llvm.riscv.vrsub.nxv4i64.i64( , + , i64, i64); @@ -914,6 +955,7 @@ define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -948,6 +990,7 @@ entry: declare @llvm.riscv.vrsub.nxv8i64.i64( , + , i64, i64); @@ -959,6 +1002,7 @@ define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -999,6 +1043,7 @@ define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -1031,6 +1076,7 @@ define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -1063,6 +1109,7 @@ define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -1095,6 +1142,7 @@ define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -1127,6 +1175,7 @@ define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -1159,6 +1208,7 @@ define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -1191,6 +1241,7 @@ define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -1223,6 +1274,7 @@ define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -1255,6 +1307,7 @@ define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -1287,6 +1340,7 @@ define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -1319,6 +1373,7 @@ define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -1351,6 +1406,7 @@ define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -1383,6 +1439,7 @@ define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -1415,6 +1472,7 @@ define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -1447,6 +1505,7 @@ define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -1479,6 +1538,7 @@ define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -1511,6 +1571,7 @@ define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -1543,6 +1604,7 @@ define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -1575,6 +1637,7 @@ define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -1607,6 +1670,7 @@ define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -1639,6 +1703,7 @@ define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -1671,6 +1736,7 @@ define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll index 968fbf1..2961c0d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsadd.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll index b1cf6b22..4042be6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsadd.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsadd.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsadd.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsadd.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsadd.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsadd.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsadd.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll index c57a590..9c8492f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsaddu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll index 991d230..10efea7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsaddu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsaddu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsaddu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsaddu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsaddu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsaddu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsaddu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 5c0eb6b..5d0c898 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -7,12 +7,12 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64) +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64) -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64) -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64) declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(double, i64) declare @llvm.riscv.vfmv.v.f.nxv2f32.f32(float, i64) @@ -37,11 +37,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -70,16 +70,16 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -103,18 +103,18 @@ entry: if.then: ; preds = %entry %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry %2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %2) + %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then %vl.0 = phi i64 [ %0, %if.then], [ %2, %if.else ] %c.0 = phi [ %1, %if.then ], [ %3, %if.else ] - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %vl.0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %vl.0) ret %4 } @@ -158,7 +158,7 @@ entry: if.then: ; preds = %entry %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %avl) %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %avl) - %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %0, %1, i64 %avl) + %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 %avl) %3 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %2, * %3, i64 %avl) br label %if.end @@ -166,13 +166,13 @@ if.then: ; preds = %entry if.else: ; preds = %entry %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %avl) %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %avl) - %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %4, %5, i64 %avl) + %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 %avl) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %6, * %7, i64 %avl) br label %if.end if.end: ; preds = %if.else, %if.then - %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %l, %r, i64 %avl) + %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %l, %r, i64 %avl) ret %8 } @@ -204,11 +204,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -218,11 +218,11 @@ if.end: ; preds = %if.else, %if.then br i1 %tobool3, label %if.else5, label %if.then4 if.then4: ; preds = %if.end - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) br label %if.end6 if.else5: ; preds = %if.end - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %a, %c.0, i64 %0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %a, %c.0, i64 %0) br label %if.end6 if.end6: ; preds = %if.else5, %if.then4 @@ -284,11 +284,11 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -301,7 +301,7 @@ if.then4: ; preds = %if.end %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %3) %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %3) - %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %4, %5, i64 %3) + %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 %3) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %6, * %7, i64 %3) br label %if.end10 @@ -310,13 +310,13 @@ if.else5: ; preds = %if.end %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0) %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %8) %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %8) - %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %9, %10, i64 %8) + %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 %8) %12 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %11, * %12, i64 %8) br label %if.end10 if.end10: ; preds = %if.else5, %if.then4 - %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %c.0, i64 %0) + %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %c.0, i64 %0) ret %13 } @@ -368,12 +368,12 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry call void @foo() - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -428,17 +428,17 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) call void @foo() br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -520,18 +520,18 @@ entry: if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.nxv2i32( %b, i16 0, i64 %vl) + %c = call @llvm.riscv.vwadd.nxv2i32( undef, %b, i16 0, i64 %vl) br label %if.end if.end: %d = phi [ %z, %entry ], [ %c, %if ] - %e = call @llvm.riscv.vadd.nxv2i32( %a, %d, i64 %vl) + %e = call @llvm.riscv.vadd.nxv2i32( undef, %a, %d, i64 %vl) ret %e } declare @llvm.riscv.vle.nxv2i32(, *, i64) declare @llvm.riscv.vle.nxv2i16(, *, i64) -declare @llvm.riscv.vwadd.nxv2i32(, i16, i64) -declare @llvm.riscv.vadd.nxv2i32(, , i64) +declare @llvm.riscv.vwadd.nxv2i32(, , i16, i64) +declare @llvm.riscv.vadd.nxv2i32(, , , i64) ; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will ; see two different vtypes with the same SEW/LMUL ratio. At if2.end we will only @@ -566,7 +566,7 @@ entry: if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %a, %b, i64 %vl) + %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %a, %b, i64 %vl) br label %if.end if.end: @@ -575,12 +575,12 @@ if.end: if2: %e = call @llvm.riscv.vle.nxv2i16( undef, * %z, i64 %vl) - %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %d, %e, i64 %vl) + %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %d, %e, i64 %vl) br label %if2.end if2.end: %g = phi [ %d, %if.end ], [ %f, %if2 ] - %h = call @llvm.riscv.vadd.nxv2i32( %g, %w, i64 %vl) + %h = call @llvm.riscv.vadd.nxv2i32( undef, %g, %w, i64 %vl) ret %h } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , i64) +declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index 1cb4169..489e43c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -15,11 +15,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -56,11 +56,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %1, %1, i64 %2) + %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %1, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -79,11 +79,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.end: ; preds = %if.else, %if.then @@ -125,10 +125,10 @@ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index e708f5b..f08cf4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -7,6 +7,7 @@ declare i64 @llvm.riscv.vsetvlimax(i64, i64) declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , + , i64) declare @llvm.riscv.vle.mask.nxv1i64( , @@ -23,6 +24,7 @@ define @test1(i64 %avl, %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %0) @@ -38,6 +40,7 @@ define @test2(i64 %avl, %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %avl) @@ -247,6 +250,7 @@ define @test13( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 -1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir index 4aa61e0..d7a49aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -10,14 +10,14 @@ define @add( %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) ret %a } define @load_add(* %0, %1, i64 %2) #0 { entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } @@ -60,7 +60,7 @@ define @vsetvli_add( %0, %1, i64 %avl) #0 { entry: %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %a) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %a) ret %b } @@ -68,12 +68,12 @@ entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) call void asm sideeffect "", ""() - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll index a9a9ea8..d01e7a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( , + , i8, i32); @@ -14,6 +15,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i8.i8( , + , i8, i32); @@ -59,6 +62,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i8.i8( , + , i8, i32); @@ -104,6 +109,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i8.i8( , + , i8, i32); @@ -149,6 +156,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i8.i8( , + , i8, i32); @@ -194,6 +203,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vslide1down.nxv32i8.i8( , + , i8, i32); @@ -239,6 +250,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -273,6 +285,7 @@ entry: declare @llvm.riscv.vslide1down.nxv64i8.i8( , + , i8, i32); @@ -284,6 +297,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -318,6 +332,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i16.i16( , + , i16, i32); @@ -329,6 +344,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -363,6 +379,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i16.i16( , + , i16, i32); @@ -374,6 +391,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -408,6 +426,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i16.i16( , + , i16, i32); @@ -419,6 +438,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -453,6 +473,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i16.i16( , + , i16, i32); @@ -464,6 +485,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -498,6 +520,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i16.i16( , + , i16, i32); @@ -509,6 +532,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -543,6 +567,7 @@ entry: declare @llvm.riscv.vslide1down.nxv32i16.i16( , + , i16, i32); @@ -554,6 +579,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -588,6 +614,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i32.i32( , + , i32, i32); @@ -599,6 +626,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -633,6 +661,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i32.i32( , + , i32, i32); @@ -644,6 +673,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -678,6 +708,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i32.i32( , + , i32, i32); @@ -689,6 +720,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -723,6 +755,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i32.i32( , + , i32, i32); @@ -734,6 +767,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -768,6 +802,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i32.i32( , + , i32, i32); @@ -779,6 +814,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -813,6 +849,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i64.i64( , + , i64, i32); @@ -826,6 +863,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -864,6 +902,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i64.i64( , + , i64, i32); @@ -877,6 +916,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -915,6 +955,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i64.i64( , + , i64, i32); @@ -928,6 +969,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -966,6 +1008,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i64.i64( , + , i64, i32); @@ -979,6 +1022,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll index 2110f21..696c12a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( , + , i8, i64); @@ -14,6 +15,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i8.i8( , + , i8, i64); @@ -59,6 +62,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i8.i8( , + , i8, i64); @@ -104,6 +109,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i8.i8( , + , i8, i64); @@ -149,6 +156,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i8.i8( , + , i8, i64); @@ -194,6 +203,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vslide1down.nxv32i8.i8( , + , i8, i64); @@ -239,6 +250,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -273,6 +285,7 @@ entry: declare @llvm.riscv.vslide1down.nxv64i8.i8( , + , i8, i64); @@ -284,6 +297,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -318,6 +332,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i16.i16( , + , i16, i64); @@ -329,6 +344,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -363,6 +379,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i16.i16( , + , i16, i64); @@ -374,6 +391,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -408,6 +426,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i16.i16( , + , i16, i64); @@ -419,6 +438,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -453,6 +473,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i16.i16( , + , i16, i64); @@ -464,6 +485,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -498,6 +520,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i16.i16( , + , i16, i64); @@ -509,6 +532,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -543,6 +567,7 @@ entry: declare @llvm.riscv.vslide1down.nxv32i16.i16( , + , i16, i64); @@ -554,6 +579,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -588,6 +614,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i32.i32( , + , i32, i64); @@ -599,6 +626,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -633,6 +661,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i32.i32( , + , i32, i64); @@ -644,6 +673,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -678,6 +708,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i32.i32( , + , i32, i64); @@ -689,6 +720,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -723,6 +755,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i32.i32( , + , i32, i64); @@ -734,6 +767,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -768,6 +802,7 @@ entry: declare @llvm.riscv.vslide1down.nxv16i32.i32( , + , i32, i64); @@ -779,6 +814,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -813,6 +849,7 @@ entry: declare @llvm.riscv.vslide1down.nxv1i64.i64( , + , i64, i64); @@ -824,6 +861,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -858,6 +896,7 @@ entry: declare @llvm.riscv.vslide1down.nxv2i64.i64( , + , i64, i64); @@ -869,6 +908,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -903,6 +943,7 @@ entry: declare @llvm.riscv.vslide1down.nxv4i64.i64( , + , i64, i64); @@ -914,6 +955,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -948,6 +990,7 @@ entry: declare @llvm.riscv.vslide1down.nxv8i64.i64( , + , i64, i64); @@ -959,6 +1002,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll index c5b0bb3..1209956 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( , + , i8, i32); @@ -15,6 +16,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i8.i8( , + , i8, i32); @@ -61,6 +64,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i8.i8( , + , i8, i32); @@ -107,6 +112,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i8.i8( , + , i8, i32); @@ -153,6 +160,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -187,6 +195,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i8.i8( , + , i8, i32); @@ -199,6 +208,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -233,6 +243,7 @@ entry: declare @llvm.riscv.vslide1up.nxv32i8.i8( , + , i8, i32); @@ -245,6 +256,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -279,6 +291,7 @@ entry: declare @llvm.riscv.vslide1up.nxv64i8.i8( , + , i8, i32); @@ -291,6 +304,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -325,6 +339,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i16.i16( , + , i16, i32); @@ -337,6 +352,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -371,6 +387,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i16.i16( , + , i16, i32); @@ -383,6 +400,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -417,6 +435,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i16.i16( , + , i16, i32); @@ -429,6 +448,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -463,6 +483,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i16.i16( , + , i16, i32); @@ -475,6 +496,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -509,6 +531,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i16.i16( , + , i16, i32); @@ -521,6 +544,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -555,6 +579,7 @@ entry: declare @llvm.riscv.vslide1up.nxv32i16.i16( , + , i16, i32); @@ -567,6 +592,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -601,6 +627,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i32.i32( , + , i32, i32); @@ -613,6 +640,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -647,6 +675,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i32.i32( , + , i32, i32); @@ -659,6 +688,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -693,6 +723,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i32.i32( , + , i32, i32); @@ -705,6 +736,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -739,6 +771,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i32.i32( , + , i32, i32); @@ -751,6 +784,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -785,6 +819,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i32.i32( , + , i32, i32); @@ -797,6 +832,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -831,6 +867,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i64.i64( , + , i64, i32); @@ -844,6 +881,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -882,6 +920,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i64.i64( , + , i64, i32); @@ -895,6 +934,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -933,6 +973,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i64.i64( , + , i64, i32); @@ -946,6 +987,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -984,6 +1026,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i64.i64( , + , i64, i32); @@ -997,6 +1040,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll index 81c16b9..a0f9476 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( , + , i8, i64); @@ -15,6 +16,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i8.i8( , + , i8, i64); @@ -61,6 +64,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -95,6 +99,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i8.i8( , + , i8, i64); @@ -107,6 +112,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -141,6 +147,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i8.i8( , + , i8, i64); @@ -153,6 +160,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -187,6 +195,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i8.i8( , + , i8, i64); @@ -199,6 +208,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -233,6 +243,7 @@ entry: declare @llvm.riscv.vslide1up.nxv32i8.i8( , + , i8, i64); @@ -245,6 +256,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -279,6 +291,7 @@ entry: declare @llvm.riscv.vslide1up.nxv64i8.i8( , + , i8, i64); @@ -291,6 +304,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -325,6 +339,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i16.i16( , + , i16, i64); @@ -337,6 +352,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -371,6 +387,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i16.i16( , + , i16, i64); @@ -383,6 +400,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -417,6 +435,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i16.i16( , + , i16, i64); @@ -429,6 +448,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -463,6 +483,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i16.i16( , + , i16, i64); @@ -475,6 +496,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -509,6 +531,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i16.i16( , + , i16, i64); @@ -521,6 +544,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -555,6 +579,7 @@ entry: declare @llvm.riscv.vslide1up.nxv32i16.i16( , + , i16, i64); @@ -567,6 +592,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -601,6 +627,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i32.i32( , + , i32, i64); @@ -613,6 +640,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -647,6 +675,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i32.i32( , + , i32, i64); @@ -659,6 +688,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -693,6 +723,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i32.i32( , + , i32, i64); @@ -705,6 +736,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -739,6 +771,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i32.i32( , + , i32, i64); @@ -751,6 +784,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -785,6 +819,7 @@ entry: declare @llvm.riscv.vslide1up.nxv16i32.i32( , + , i32, i64); @@ -797,6 +832,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -831,6 +867,7 @@ entry: declare @llvm.riscv.vslide1up.nxv1i64.i64( , + , i64, i64); @@ -843,6 +880,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -877,6 +915,7 @@ entry: declare @llvm.riscv.vslide1up.nxv2i64.i64( , + , i64, i64); @@ -889,6 +928,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -923,6 +963,7 @@ entry: declare @llvm.riscv.vslide1up.nxv4i64.i64( , + , i64, i64); @@ -935,6 +976,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -969,6 +1011,7 @@ entry: declare @llvm.riscv.vslide1up.nxv8i64.i64( , + , i64, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll index cd14152..019b40e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsll.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsll.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsll.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsll.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsll.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsll.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsll.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsll.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsll.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsll.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsll.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsll.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsll.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsll.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsll.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsll.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsll.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsll.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsll.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsll.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsll.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsll.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsll.nxv1i8( , + , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsll.nxv2i8( , + , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsll.nxv4i8( , + , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsll.nxv8i8( , + , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsll.nxv16i8( , + , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsll.nxv32i8( , + , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsll.nxv64i8( , + , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsll.nxv1i16( , + , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsll.nxv2i16( , + , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsll.nxv4i16( , + , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsll.nxv8i16( , + , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsll.nxv16i16( , + , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsll.nxv32i16( , + , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsll.nxv1i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsll.nxv2i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsll.nxv4i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsll.nxv8i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsll.nxv16i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsll.nxv1i64( , + , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsll.nxv2i64( , + , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsll.nxv4i64( , + , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsll.nxv8i64( , + , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2008,6 +2097,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 1, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2697,6 +2807,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll index 305f871..da88d4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsll.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsll.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsll.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsll.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsll.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsll.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsll.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsll.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsll.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsll.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsll.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsll.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsll.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsll.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsll.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsll.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsll.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsll.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsll.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsll.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsll.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsll.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsll.nxv1i8( , + , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsll.nxv2i8( , + , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsll.nxv4i8( , + , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsll.nxv8i8( , + , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsll.nxv16i8( , + , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsll.nxv32i8( , + , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsll.nxv64i8( , + , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsll.nxv1i16( , + , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsll.nxv2i16( , + , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsll.nxv4i16( , + , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsll.nxv8i16( , + , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsll.nxv16i16( , + , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsll.nxv32i16( , + , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsll.nxv1i32( , + , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsll.nxv2i32( , + , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsll.nxv4i32( , + , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsll.nxv8i32( , + , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsll.nxv16i32( , + , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsll.nxv1i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsll.nxv2i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsll.nxv4i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsll.nxv8i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2008,6 +2097,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 1, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2697,6 +2807,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll index be3a442..50a08f0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsmul.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll index 3a5eb3c..5380ff9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsmul.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsmul.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsmul.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsmul.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsmul.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsmul.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsmul.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll index 78eac5f..24f8d8a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsra.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsra.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsra.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsra.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsra.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsra.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsra.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsra.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsra.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsra.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsra.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsra.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsra.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsra.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsra.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsra.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsra.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsra.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsra.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsra.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsra.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsra.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsra.nxv1i8( , + , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsra.nxv2i8( , + , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsra.nxv4i8( , + , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsra.nxv8i8( , + , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsra.nxv16i8( , + , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsra.nxv32i8( , + , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsra.nxv64i8( , + , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsra.nxv1i16( , + , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsra.nxv2i16( , + , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsra.nxv4i16( , + , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsra.nxv8i16( , + , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsra.nxv16i16( , + , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsra.nxv32i16( , + , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsra.nxv1i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsra.nxv2i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsra.nxv4i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsra.nxv8i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsra.nxv16i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsra.nxv1i64( , + , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsra.nxv2i64( , + , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsra.nxv4i64( , + , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsra.nxv8i64( , + , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll index 4841ca0..435d59d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsra.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsra.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsra.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsra.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsra.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsra.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsra.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsra.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsra.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsra.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsra.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsra.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsra.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsra.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsra.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsra.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsra.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsra.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsra.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsra.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsra.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsra.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsra.nxv1i8( , + , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsra.nxv2i8( , + , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsra.nxv4i8( , + , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsra.nxv8i8( , + , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsra.nxv16i8( , + , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsra.nxv32i8( , + , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsra.nxv64i8( , + , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsra.nxv1i16( , + , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsra.nxv2i16( , + , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsra.nxv4i16( , + , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsra.nxv8i16( , + , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsra.nxv16i16( , + , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsra.nxv32i16( , + , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsra.nxv1i32( , + , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsra.nxv2i32( , + , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsra.nxv4i32( , + , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsra.nxv8i32( , + , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsra.nxv16i32( , + , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsra.nxv1i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsra.nxv2i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsra.nxv4i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsra.nxv8i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll index 8ae5ec1..0d68ab0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i8( , + , i32, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i8( , + , i32, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i8( , + , i32, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i8( , + , i32, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i8( , + , i32, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i8( , + , i32, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsrl.nxv64i8( , + , i32, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i16( , + , i32, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i16( , + , i32, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i16( , + , i32, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i16( , + , i32, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i16( , + , i32, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i16( , + , i32, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i64( , + , i32, i32); @@ -1818,6 +1899,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i64( , + , i32, i32); @@ -1863,6 +1946,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i64( , + , i32, i32); @@ -1908,6 +1993,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i64( , + , i32, i32); @@ -1953,6 +2040,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll index 054d81a..270f5ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i8( , + , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i8( , + , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i8( , + , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i8( , + , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i8( , + , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i8( , + , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsrl.nxv64i8( , + , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i16( , + , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i16( , + , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i16( , + , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i16( , + , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i16( , + , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsrl.nxv32i16( , + , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i32( , + , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i32( , + , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i32( , + , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i32( , + , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsrl.nxv16i32( , + , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsrl.nxv1i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsrl.nxv2i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsrl.nxv4i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsrl.nxv8i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll index 9cb0c48..622d1e9e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssra.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssra.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssra.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssra.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssra.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssra.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssra.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssra.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssra.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssra.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssra.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssra.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssra.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssra.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssra.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssra.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssra.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssra.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vssra.nxv1i8( , + , i32, i32); @@ -827,6 +864,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vssra.nxv2i8( , + , i32, i32); @@ -872,6 +911,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vssra.nxv4i8( , + , i32, i32); @@ -917,6 +958,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vssra.nxv8i8( , + , i32, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vssra.nxv16i8( , + , i32, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vssra.nxv32i8( , + , i32, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vssra.nxv64i8( , + , i32, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vssra.nxv1i16( , + , i32, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vssra.nxv2i16( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vssra.nxv4i16( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vssra.nxv8i16( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vssra.nxv16i16( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: declare @llvm.riscv.vssra.nxv32i16( , + , i32, i32); @@ -1367,6 +1428,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: declare @llvm.riscv.vssra.nxv1i32( , + , i32, i32); @@ -1412,6 +1475,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: declare @llvm.riscv.vssra.nxv2i32( , + , i32, i32); @@ -1457,6 +1522,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: declare @llvm.riscv.vssra.nxv4i32( , + , i32, i32); @@ -1502,6 +1569,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: declare @llvm.riscv.vssra.nxv8i32( , + , i32, i32); @@ -1547,6 +1616,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: declare @llvm.riscv.vssra.nxv16i32( , + , i32, i32); @@ -1592,6 +1663,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: declare @llvm.riscv.vssra.nxv1i64( , + , i32, i32); @@ -1637,6 +1710,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: declare @llvm.riscv.vssra.nxv2i64( , + , i32, i32); @@ -1682,6 +1757,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: declare @llvm.riscv.vssra.nxv4i64( , + , i32, i32); @@ -1727,6 +1804,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: declare @llvm.riscv.vssra.nxv8i64( , + , i32, i32); @@ -1772,6 +1851,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll index b39bdb4..621d8a5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssra.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssra.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssra.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssra.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssra.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssra.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssra.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssra.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssra.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssra.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssra.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssra.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssra.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssra.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssra.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssra.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssra.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssra.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssra.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssra.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssra.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssra.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssra.nxv1i8( , + , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssra.nxv2i8( , + , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssra.nxv4i8( , + , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssra.nxv8i8( , + , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssra.nxv16i8( , + , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssra.nxv32i8( , + , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssra.nxv64i8( , + , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssra.nxv1i16( , + , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssra.nxv2i16( , + , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssra.nxv4i16( , + , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssra.nxv8i16( , + , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssra.nxv16i16( , + , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssra.nxv32i16( , + , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssra.nxv1i32( , + , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssra.nxv2i32( , + , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssra.nxv4i32( , + , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssra.nxv8i32( , + , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssra.nxv16i32( , + , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssra.nxv1i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vssra.nxv2i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vssra.nxv4i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vssra.nxv8i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll index 173f2f8..bac9d51 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i8( , + , i32, i32); @@ -827,6 +864,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i8( , + , i32, i32); @@ -872,6 +911,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i8( , + , i32, i32); @@ -917,6 +958,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i8( , + , i32, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i8( , + , i32, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i8( , + , i32, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vssrl.nxv64i8( , + , i32, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i16( , + , i32, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i16( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i16( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i16( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i16( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i16( , + , i32, i32); @@ -1367,6 +1428,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i32( , + , i32, i32); @@ -1412,6 +1475,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i32( , + , i32, i32); @@ -1457,6 +1522,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i32( , + , i32, i32); @@ -1502,6 +1569,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i32( , + , i32, i32); @@ -1547,6 +1616,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i32( , + , i32, i32); @@ -1592,6 +1663,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i64( , + , i32, i32); @@ -1637,6 +1710,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i64( , + , i32, i32); @@ -1682,6 +1757,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i64( , + , i32, i32); @@ -1727,6 +1804,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i64( , + , i32, i32); @@ -1772,6 +1851,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll index 3bc2f68..98da679 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i8( , + , i64, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i8( , + , i64, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i8( , + , i64, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i8( , + , i64, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i8( , + , i64, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i8( , + , i64, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssrl.nxv64i8( , + , i64, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i16( , + , i64, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i16( , + , i64, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i16( , + , i64, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i16( , + , i64, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i16( , + , i64, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssrl.nxv32i16( , + , i64, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i32( , + , i64, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i32( , + , i64, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i32( , + , i64, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i32( , + , i64, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssrl.nxv16i32( , + , i64, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssrl.nxv1i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vssrl.nxv2i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vssrl.nxv4i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vssrl.nxv8i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll index c695923..d8c6271 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssub.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssub.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssub.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssub.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssub.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssub.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssub.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssub.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssub.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssub.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssub.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssub.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssub.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssub.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssub.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssub.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssub.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssub.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssub.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssub.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssub.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssub.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssub.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssub.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssub.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssub.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssub.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssub.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssub.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssub.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssub.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssub.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssub.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssub.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssub.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssub.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssub.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssub.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssub.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssub.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssub.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vssub.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vssub.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vssub.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll index bff302a..5c328c2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssub.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssub.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssub.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssub.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssub.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssub.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssub.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssub.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssub.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssub.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssub.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssub.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssub.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssub.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssub.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssub.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssub.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssub.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssub.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssub.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssub.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssub.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssub.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssub.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssub.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssub.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssub.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssub.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssub.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssub.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssub.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssub.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssub.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssub.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssub.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssub.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssub.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssub.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssub.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssub.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssub.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vssub.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vssub.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vssub.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll index 2b0fcf5..6eab93f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssubu.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll index 0169d78..2f05441 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vssubu.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vssubu.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vssubu.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vssubu.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vssubu.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vssubu.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vssubu.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll index a9896a3..4f0bff3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsub.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsub.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsub.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsub.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsub.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsub.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsub.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsub.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsub.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsub.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsub.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsub.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsub.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsub.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsub.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsub.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsub.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsub.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsub.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsub.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsub.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsub.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsub.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsub.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsub.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsub.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsub.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsub.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsub.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsub.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsub.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsub.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsub.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsub.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsub.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsub.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsub.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsub.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsub.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsub.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsub.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vsub.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vsub.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vsub.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll index 4840fa6..2cb7f35 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vsub.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vsub.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vsub.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vsub.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vsub.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vsub.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vsub.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vsub.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vsub.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vsub.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vsub.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vsub.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vsub.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vsub.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vsub.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vsub.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vsub.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vsub.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vsub.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vsub.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vsub.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vsub.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vsub.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vsub.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vsub.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vsub.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vsub.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vsub.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vsub.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vsub.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vsub.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vsub.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vsub.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vsub.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vsub.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vsub.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vsub.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vsub.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vsub.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vsub.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vsub.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vsub.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vsub.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vsub.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll index 72f5599..4c2c1a9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll index 348d2b3..722349f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll index 6fab280..b11f800 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( , + , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( , + , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( , + , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( , + , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( , + , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( , + , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( , + , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( , + , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( , + , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( , + , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( , + , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( , + , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( , + , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( , + , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( , + , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i16.i8( , + , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i16.i8( , + , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i16.i8( , + , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i16.i8( , + , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i16.i8( , + , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv32i16.i8( , + , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i32.i16( , + , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i32.i16( , + , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i32.i16( , + , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i32.i16( , + , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i32.i16( , + , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i64.i32( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i64.i32( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i64.i32( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i64.i32( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll index 65481d3..6e753d8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( , + , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( , + , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( , + , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( , + , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( , + , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( , + , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( , + , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( , + , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( , + , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( , + , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( , + , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( , + , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( , + , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( , + , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( , + , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i16.i8( , + , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i16.i8( , + , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i16.i8( , + , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i16.i8( , + , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i16.i8( , + , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv32i16.i8( , + , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i32.i16( , + , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i32.i16( , + , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i32.i16( , + , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i32.i16( , + , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv16i32.i16( , + , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv1i64.i32( , + , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv2i64.i32( , + , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv4i64.i32( , + , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwadd.w.nxv8i64.i32( , + , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll index f37d6ed..a8c0d0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll index 6c546a5..d5acb13 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll index 1128135..ff0c070 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( , + , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( , + , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( , + , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( , + , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( , + , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( , + , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( , + , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( , + , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( , + , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( , + , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( , + , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( , + , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( , + , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( , + , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( , + , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i16.i8( , + , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i16.i8( , + , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i16.i8( , + , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i16.i8( , + , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i16.i8( , + , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv32i16.i8( , + , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i32.i16( , + , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i32.i16( , + , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i32.i16( , + , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i32.i16( , + , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i32.i16( , + , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i64.i32( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i64.i32( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i64.i32( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i64.i32( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll index e3840e3..486acc7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( , + , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( , + , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( , + , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( , + , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( , + , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( , + , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( , + , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( , + , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( , + , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( , + , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( , + , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( , + , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( , + , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( , + , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( , + , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i16.i8( , + , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i16.i8( , + , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i16.i8( , + , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i16.i8( , + , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i16.i8( , + , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv32i16.i8( , + , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i32.i16( , + , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i32.i16( , + , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i32.i16( , + , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i32.i16( , + , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv16i32.i16( , + , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv1i64.i32( , + , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv2i64.i32( , + , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv4i64.i32( , + , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwaddu.w.nxv8i64.i32( , + , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll index 338e4b3..1484ded 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll index da5accc..8d56ba9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll index 4f480eb..1edfa38 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll index da7d9b9..f821a23 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll index 11ac174..ce02e39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll index e9adaec..ab57eb0e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll index ef271841..a37b6c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll index 2de365e..bc1e468 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll index 213cb69..a57271f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( , + , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( , + , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( , + , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( , + , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( , + , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( , + , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( , + , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( , + , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( , + , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( , + , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( , + , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( , + , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( , + , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( , + , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( , + , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i16.i8( , + , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i16.i8( , + , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i16.i8( , + , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i16.i8( , + , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i16.i8( , + , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv32i16.i8( , + , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i32.i16( , + , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i32.i16( , + , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i32.i16( , + , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i32.i16( , + , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i32.i16( , + , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i64.i32( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i64.i32( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i64.i32( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i64.i32( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll index 6e8aff5..f4abf11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( , + , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( , + , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( , + , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( , + , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( , + , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( , + , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( , + , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( , + , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( , + , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( , + , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( , + , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( , + , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( , + , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( , + , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( , + , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i16.i8( , + , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i16.i8( , + , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i16.i8( , + , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i16.i8( , + , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i16.i8( , + , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv32i16.i8( , + , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i32.i16( , + , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i32.i16( , + , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i32.i16( , + , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i32.i16( , + , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv16i32.i16( , + , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv1i64.i32( , + , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv2i64.i32( , + , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv4i64.i32( , + , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwsub.w.nxv8i64.i32( , + , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll index 603f685..2109237 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll index 3c4cc02..dac6937 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ entry: } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll index 1406f14..3c7f59d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( , + , , i32); @@ -14,6 +15,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( , + , , i32); @@ -59,6 +62,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( , + , , i32); @@ -104,6 +109,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( , + , , i32); @@ -149,6 +156,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( , + , , i32); @@ -194,6 +203,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( , + , , i32); @@ -239,6 +250,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( , + , , i32); @@ -285,6 +298,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( , + , , i32); @@ -330,6 +345,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( , + , , i32); @@ -375,6 +392,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( , + , , i32); @@ -420,6 +439,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( , + , , i32); @@ -465,6 +486,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( , + , , i32); @@ -511,6 +534,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( , + , , i32); @@ -556,6 +581,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( , + , , i32); @@ -601,6 +628,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( , + , , i32); @@ -646,6 +675,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i16.i8( , + , i8, i32); @@ -692,6 +723,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i16.i8( , + , i8, i32); @@ -737,6 +770,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i16.i8( , + , i8, i32); @@ -782,6 +817,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i16.i8( , + , i8, i32); @@ -827,6 +864,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i16.i8( , + , i8, i32); @@ -872,6 +911,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv32i16.i8( , + , i8, i32); @@ -917,6 +958,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i32.i16( , + , i16, i32); @@ -962,6 +1005,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i32.i16( , + , i16, i32); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i32.i16( , + , i16, i32); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i32.i16( , + , i16, i32); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i32.i16( , + , i16, i32); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i64.i32( , + , i32, i32); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i64.i32( , + , i32, i32); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i64.i32( , + , i32, i32); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i64.i32( , + , i32, i32); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll index d504b68..4b47c25 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( , + , , i64); @@ -14,6 +15,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( , + , , i64); @@ -59,6 +62,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -93,6 +97,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( , + , , i64); @@ -104,6 +109,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -138,6 +144,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( , + , , i64); @@ -149,6 +156,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( , + , , i64); @@ -194,6 +203,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -228,6 +238,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( , + , , i64); @@ -239,6 +250,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( , + , , i64); @@ -285,6 +298,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -319,6 +333,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( , + , , i64); @@ -330,6 +345,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -364,6 +380,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( , + , , i64); @@ -375,6 +392,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -409,6 +427,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( , + , , i64); @@ -420,6 +439,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -454,6 +474,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( , + , , i64); @@ -465,6 +486,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( , + , , i64); @@ -511,6 +534,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( , + , , i64); @@ -556,6 +581,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -590,6 +616,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( , + , , i64); @@ -601,6 +628,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -635,6 +663,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( , + , , i64); @@ -646,6 +675,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i16.i8( , + , i8, i64); @@ -692,6 +723,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i16.i8( , + , i8, i64); @@ -737,6 +770,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i16.i8( , + , i8, i64); @@ -782,6 +817,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -816,6 +852,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i16.i8( , + , i8, i64); @@ -827,6 +864,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -861,6 +899,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i16.i8( , + , i8, i64); @@ -872,6 +911,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -906,6 +946,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv32i16.i8( , + , i8, i64); @@ -917,6 +958,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -951,6 +993,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i32.i16( , + , i16, i64); @@ -962,6 +1005,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -996,6 +1040,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i32.i16( , + , i16, i64); @@ -1007,6 +1052,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1041,6 +1087,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i32.i16( , + , i16, i64); @@ -1052,6 +1099,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1086,6 +1134,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i32.i16( , + , i16, i64); @@ -1097,6 +1146,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1131,6 +1181,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv16i32.i16( , + , i16, i64); @@ -1142,6 +1193,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1176,6 +1228,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv1i64.i32( , + , i32, i64); @@ -1187,6 +1240,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1221,6 +1275,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv2i64.i32( , + , i32, i64); @@ -1232,6 +1287,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1266,6 +1322,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv4i64.i32( , + , i32, i64); @@ -1277,6 +1334,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1311,6 +1369,7 @@ entry: declare @llvm.riscv.vwsubu.w.nxv8i64.i32( , + , i32, i64); @@ -1322,6 +1381,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll index 8da7f7a..0d006ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vxor.nxv1i8.nxv1i8( , , + , i32); define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vxor.nxv2i8.nxv2i8( , , + , i32); define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vxor.nxv4i8.nxv4i8( , , + , i32); define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vxor.nxv8i8.nxv8i8( , , + , i32); define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vxor.nxv16i8.nxv16i8( , , + , i32); define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vxor.nxv32i8.nxv32i8( , , + , i32); define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vxor.nxv64i8.nxv64i8( , , + , i32); define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vxor.nxv1i16.nxv1i16( , , + , i32); define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vxor.nxv2i16.nxv2i16( , , + , i32); define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vxor.nxv4i16.nxv4i16( , , + , i32); define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vxor.nxv8i16.nxv8i16( , , + , i32); define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vxor.nxv16i16.nxv16i16( , , + , i32); define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vxor.nxv32i16.nxv32i16( , , + , i32); define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vxor.nxv1i32.nxv1i32( , , + , i32); define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vxor.nxv2i32.nxv2i32( , , + , i32); define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vxor.nxv4i32.nxv4i32( , , + , i32); define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vxor.nxv8i32.nxv8i32( , , + , i32); define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vxor.nxv16i32.nxv16i32( , , + , i32); define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vxor.nxv1i64.nxv1i64( , , + , i32); define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vxor.nxv2i64.nxv2i64( , , + , i32); define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vxor.nxv4i64.nxv4i64( , , + , i32); define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vxor.nxv8i64.nxv8i64( , , + , i32); define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vxor.nxv1i8.i8( , + , i8, i32); @@ -1008,6 +1053,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vxor.nxv2i8.i8( , + , i8, i32); @@ -1053,6 +1100,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vxor.nxv4i8.i8( , + , i8, i32); @@ -1098,6 +1147,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vxor.nxv8i8.i8( , + , i8, i32); @@ -1143,6 +1194,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vxor.nxv16i8.i8( , + , i8, i32); @@ -1188,6 +1241,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vxor.nxv32i8.i8( , + , i8, i32); @@ -1233,6 +1288,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vxor.nxv64i8.i8( , + , i8, i32); @@ -1278,6 +1335,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vxor.nxv1i16.i16( , + , i16, i32); @@ -1323,6 +1382,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vxor.nxv2i16.i16( , + , i16, i32); @@ -1368,6 +1429,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vxor.nxv4i16.i16( , + , i16, i32); @@ -1413,6 +1476,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vxor.nxv8i16.i16( , + , i16, i32); @@ -1458,6 +1523,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vxor.nxv16i16.i16( , + , i16, i32); @@ -1503,6 +1570,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vxor.nxv32i16.i16( , + , i16, i32); @@ -1548,6 +1617,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vxor.nxv1i32.i32( , + , i32, i32); @@ -1593,6 +1664,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vxor.nxv2i32.i32( , + , i32, i32); @@ -1638,6 +1711,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vxor.nxv4i32.i32( , + , i32, i32); @@ -1683,6 +1758,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vxor.nxv8i32.i32( , + , i32, i32); @@ -1728,6 +1805,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vxor.nxv16i32.i32( , + , i32, i32); @@ -1773,6 +1852,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vxor.nxv1i64.i64( , + , i64, i32); @@ -1824,6 +1905,7 @@ define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1864,6 +1946,7 @@ entry: declare @llvm.riscv.vxor.nxv2i64.i64( , + , i64, i32); @@ -1881,6 +1964,7 @@ define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1921,6 +2005,7 @@ entry: declare @llvm.riscv.vxor.nxv4i64.i64( , + , i64, i32); @@ -1938,6 +2023,7 @@ define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1978,6 +2064,7 @@ entry: declare @llvm.riscv.vxor.nxv8i64.i64( , + , i64, i32); @@ -1995,6 +2082,7 @@ define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll index 08c47cd..45e21d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -4,6 +4,7 @@ declare @llvm.riscv.vxor.nxv1i8.nxv1i8( , , + , i64); define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { @@ -14,6 +15,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -49,6 +51,7 @@ entry: declare @llvm.riscv.vxor.nxv2i8.nxv2i8( , , + , i64); define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { @@ -59,6 +62,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ entry: declare @llvm.riscv.vxor.nxv4i8.nxv4i8( , , + , i64); define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { @@ -104,6 +109,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -139,6 +145,7 @@ entry: declare @llvm.riscv.vxor.nxv8i8.nxv8i8( , , + , i64); define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { @@ -149,6 +156,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -184,6 +192,7 @@ entry: declare @llvm.riscv.vxor.nxv16i8.nxv16i8( , , + , i64); define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { @@ -194,6 +203,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ entry: declare @llvm.riscv.vxor.nxv32i8.nxv32i8( , , + , i64); define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { @@ -239,6 +250,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -274,6 +286,7 @@ entry: declare @llvm.riscv.vxor.nxv64i8.nxv64i8( , , + , i64); define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { @@ -284,6 +297,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ entry: declare @llvm.riscv.vxor.nxv1i16.nxv1i16( , , + , i64); define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { @@ -330,6 +345,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ entry: declare @llvm.riscv.vxor.nxv2i16.nxv2i16( , , + , i64); define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { @@ -375,6 +392,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -410,6 +428,7 @@ entry: declare @llvm.riscv.vxor.nxv4i16.nxv4i16( , , + , i64); define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { @@ -420,6 +439,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -455,6 +475,7 @@ entry: declare @llvm.riscv.vxor.nxv8i16.nxv8i16( , , + , i64); define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { @@ -465,6 +486,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -500,6 +522,7 @@ entry: declare @llvm.riscv.vxor.nxv16i16.nxv16i16( , , + , i64); define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { @@ -510,6 +533,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -545,6 +569,7 @@ entry: declare @llvm.riscv.vxor.nxv32i16.nxv32i16( , , + , i64); define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { @@ -555,6 +580,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -591,6 +617,7 @@ entry: declare @llvm.riscv.vxor.nxv1i32.nxv1i32( , , + , i64); define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { @@ -601,6 +628,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -636,6 +664,7 @@ entry: declare @llvm.riscv.vxor.nxv2i32.nxv2i32( , , + , i64); define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { @@ -646,6 +675,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -681,6 +711,7 @@ entry: declare @llvm.riscv.vxor.nxv4i32.nxv4i32( , , + , i64); define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { @@ -691,6 +722,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -726,6 +758,7 @@ entry: declare @llvm.riscv.vxor.nxv8i32.nxv8i32( , , + , i64); define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { @@ -736,6 +769,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -771,6 +805,7 @@ entry: declare @llvm.riscv.vxor.nxv16i32.nxv16i32( , , + , i64); define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { @@ -781,6 +816,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -817,6 +853,7 @@ entry: declare @llvm.riscv.vxor.nxv1i64.nxv1i64( , , + , i64); define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { @@ -827,6 +864,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -862,6 +900,7 @@ entry: declare @llvm.riscv.vxor.nxv2i64.nxv2i64( , , + , i64); define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { @@ -872,6 +911,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -907,6 +947,7 @@ entry: declare @llvm.riscv.vxor.nxv4i64.nxv4i64( , , + , i64); define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { @@ -917,6 +958,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -952,6 +994,7 @@ entry: declare @llvm.riscv.vxor.nxv8i64.nxv8i64( , , + , i64); define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { @@ -962,6 +1005,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -997,6 +1041,7 @@ entry: declare @llvm.riscv.vxor.nxv1i8.i8( , + , i8, i64); @@ -1008,6 +1053,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1042,6 +1088,7 @@ entry: declare @llvm.riscv.vxor.nxv2i8.i8( , + , i8, i64); @@ -1053,6 +1100,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1087,6 +1135,7 @@ entry: declare @llvm.riscv.vxor.nxv4i8.i8( , + , i8, i64); @@ -1098,6 +1147,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1132,6 +1182,7 @@ entry: declare @llvm.riscv.vxor.nxv8i8.i8( , + , i8, i64); @@ -1143,6 +1194,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1177,6 +1229,7 @@ entry: declare @llvm.riscv.vxor.nxv16i8.i8( , + , i8, i64); @@ -1188,6 +1241,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1222,6 +1276,7 @@ entry: declare @llvm.riscv.vxor.nxv32i8.i8( , + , i8, i64); @@ -1233,6 +1288,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1267,6 +1323,7 @@ entry: declare @llvm.riscv.vxor.nxv64i8.i8( , + , i8, i64); @@ -1278,6 +1335,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1312,6 +1370,7 @@ entry: declare @llvm.riscv.vxor.nxv1i16.i16( , + , i16, i64); @@ -1323,6 +1382,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1357,6 +1417,7 @@ entry: declare @llvm.riscv.vxor.nxv2i16.i16( , + , i16, i64); @@ -1368,6 +1429,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1402,6 +1464,7 @@ entry: declare @llvm.riscv.vxor.nxv4i16.i16( , + , i16, i64); @@ -1413,6 +1476,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1447,6 +1511,7 @@ entry: declare @llvm.riscv.vxor.nxv8i16.i16( , + , i16, i64); @@ -1458,6 +1523,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1492,6 +1558,7 @@ entry: declare @llvm.riscv.vxor.nxv16i16.i16( , + , i16, i64); @@ -1503,6 +1570,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1537,6 +1605,7 @@ entry: declare @llvm.riscv.vxor.nxv32i16.i16( , + , i16, i64); @@ -1548,6 +1617,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1582,6 +1652,7 @@ entry: declare @llvm.riscv.vxor.nxv1i32.i32( , + , i32, i64); @@ -1593,6 +1664,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1627,6 +1699,7 @@ entry: declare @llvm.riscv.vxor.nxv2i32.i32( , + , i32, i64); @@ -1638,6 +1711,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1672,6 +1746,7 @@ entry: declare @llvm.riscv.vxor.nxv4i32.i32( , + , i32, i64); @@ -1683,6 +1758,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1717,6 +1793,7 @@ entry: declare @llvm.riscv.vxor.nxv8i32.i32( , + , i32, i64); @@ -1728,6 +1805,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1762,6 +1840,7 @@ entry: declare @llvm.riscv.vxor.nxv16i32.i32( , + , i32, i64); @@ -1773,6 +1852,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1807,6 +1887,7 @@ entry: declare @llvm.riscv.vxor.nxv1i64.i64( , + , i64, i64); @@ -1818,6 +1899,7 @@ define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1852,6 +1934,7 @@ entry: declare @llvm.riscv.vxor.nxv2i64.i64( , + , i64, i64); @@ -1863,6 +1946,7 @@ define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1897,6 +1981,7 @@ entry: declare @llvm.riscv.vxor.nxv4i64.i64( , + , i64, i64); @@ -1908,6 +1993,7 @@ define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1942,6 +2028,7 @@ entry: declare @llvm.riscv.vxor.nxv8i64.i64( , + , i64, i64); @@ -1953,6 +2040,7 @@ define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i64 %1) -- 2.7.4