From 28c3a74a5c6c7ed1ac97a010ca080eaacfd7f324 Mon Sep 17 00:00:00 2001 From: eopXD Date: Sun, 16 Jul 2023 20:18:11 -0700 Subject: [PATCH] [Clang][RISCV] Improve diagnostic message for full multiply intrinsics The full multiply intrinsics are not included for EEW=64 in Zve64*. They require the V extension to be enabled. This commit improves diagnostic message from ``` :4:10: error: call to undeclared function '__riscv_vsmul_vv_i64m1'; 4 | return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); ``` to ``` test.c:5:10: error: builtin requires: v 5 | return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); ``` Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D155416 --- clang/include/clang/Basic/riscv_vector.td | 4 - clang/include/clang/Support/RISCVVIntrinsicUtils.h | 3 +- clang/lib/Sema/SemaChecking.cpp | 67 +++++++ clang/lib/Sema/SemaRISCVVectorLookup.cpp | 7 - clang/test/Sema/riscv-vector-v-check.c | 197 +++++++++++++++++++++ clang/utils/TableGen/RISCVVEmitter.cpp | 1 - 6 files changed, 265 insertions(+), 14 deletions(-) create mode 100644 clang/test/Sema/riscv-vector-v-check.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 49011d6..7e58898 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1713,13 +1713,11 @@ defm vmax : RVVSignedBinBuiltinSet; // 12.10. Vector Single-Width Integer Multiply Instructions defm vmul : RVVIntBinBuiltinSet; -let RequiredFeatures = ["FullMultiply"] in { defm vmulh : RVVSignedBinBuiltinSet; defm vmulhu : RVVUnsignedBinBuiltinSet; defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil", [["vv", "v", "vvUv"], ["vx", "v", "vvUe"]]>; -} // 12.11. Vector Integer Divide Instructions defm vdivu : RVVUnsignedBinBuiltinSet; @@ -1859,9 +1857,7 @@ let ManualCodegen = [{ defm vasub : RVVSignedBinBuiltinSetRoundingMode; // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation - let RequiredFeatures = ["FullMultiply"] in { defm vsmul : RVVSignedBinBuiltinSetRoundingMode; - } // 13.4. Vector Single-Width Scaling Shift Instructions defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode; diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h index e985a9b..804b151 100644 --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -475,8 +475,7 @@ public: enum RVVRequire : uint8_t { RVV_REQ_None = 0, RVV_REQ_RV64 = 1 << 0, - RVV_REQ_FullMultiply = 1 << 1, - RVV_REQ_Xsfvcp = 1 << 2, + RVV_REQ_Xsfvcp = 1 << 1, LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Xsfvcp) }; diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index a8bcf49..de0977e 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -4527,6 +4527,73 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, if (FeatureMissing) return true; + // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx, + // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*. + switch (BuiltinID) { + default: + break; + case RISCVVector::BI__builtin_rvv_vmulhsu_vv: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx: + case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu: + case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m: + case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu: + case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum: + case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu: + case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu: + case RISCVVector::BI__builtin_rvv_vmulhu_vv: + case RISCVVector::BI__builtin_rvv_vmulhu_vx: + case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu: + case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu: + case RISCVVector::BI__builtin_rvv_vmulhu_vv_m: + case RISCVVector::BI__builtin_rvv_vmulhu_vx_m: + case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu: + case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu: + case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum: + case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum: + case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu: + case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu: + case RISCVVector::BI__builtin_rvv_vmulh_vv: + case RISCVVector::BI__builtin_rvv_vmulh_vx: + case RISCVVector::BI__builtin_rvv_vmulh_vv_tu: + case RISCVVector::BI__builtin_rvv_vmulh_vx_tu: + case RISCVVector::BI__builtin_rvv_vmulh_vv_m: + case RISCVVector::BI__builtin_rvv_vmulh_vx_m: + case RISCVVector::BI__builtin_rvv_vmulh_vv_mu: + case RISCVVector::BI__builtin_rvv_vmulh_vx_mu: + case RISCVVector::BI__builtin_rvv_vmulh_vv_tum: + case RISCVVector::BI__builtin_rvv_vmulh_vx_tum: + case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu: + case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu: + case RISCVVector::BI__builtin_rvv_vsmul_vv: + case RISCVVector::BI__builtin_rvv_vsmul_vx: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: + case RISCVVector::BI__builtin_rvv_vsmul_vv_m: + case RISCVVector::BI__builtin_rvv_vsmul_vx_m: + case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: + case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: + case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: { + bool RequireV = false; + for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum) + RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType( + /* Bitwidth */ 64, /* IsFloat */ false); + + if (RequireV && !TI.hasFeature("v")) + return Diag(TheCall->getBeginLoc(), + diag::err_riscv_builtin_requires_extension) + << /* IsExtension */ false << TheCall->getSourceRange() << "v"; + + break; + } + } + switch (BuiltinID) { case RISCVVector::BI__builtin_rvv_vsetvli: return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp index fbed6e8..db2059e 100644 --- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp +++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp @@ -202,7 +202,6 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics( ArrayRef Recs, IntrinsicKind K) { const TargetInfo &TI = Context.getTargetInfo(); bool HasRV64 = TI.hasFeature("64bit"); - bool HasFullMultiply = TI.hasFeature("v"); // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics // in RISCVVEmitter.cpp. for (auto &Record : Recs) { @@ -256,12 +255,6 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics( !HasRV64) continue; - if ((BaseType == BasicType::Int64) && - ((Record.RequiredExtensions & RVV_REQ_FullMultiply) == - RVV_REQ_FullMultiply) && - !HasFullMultiply) - continue; - // Expanded with different LMUL. for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) { if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3)))) diff --git a/clang/test/Sema/riscv-vector-v-check.c b/clang/test/Sema/riscv-vector-v-check.c new file mode 100644 index 0000000..8faa92c --- /dev/null +++ b/clang/test/Sema/riscv-vector-v-check.c @@ -0,0 +1,197 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ +// RUN: -disable-O0-optnone -o - -fsyntax-only %s -verify +// REQUIRES: riscv-registered-target +#include + +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */ +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index a9349f1..2db48f3 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -655,7 +655,6 @@ void RVVEmitter::createRVVIntrinsics( for (auto RequiredFeature : RequiredFeatures) { RVVRequire RequireExt = StringSwitch(RequiredFeature) .Case("RV64", RVV_REQ_RV64) - .Case("FullMultiply", RVV_REQ_FullMultiply) .Case("Xsfvcp", RVV_REQ_Xsfvcp) .Default(RVV_REQ_None); assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?"); -- 2.7.4