From: Jianjian GUAN Date: Mon, 15 May 2023 07:51:29 +0000 (+0800) Subject: [RISCV] Refactor predicates for rvv intrinsic patterns. X-Git-Tag: upstream/17.0.6~8227 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9360926cfcd3bca1c0d662bf8d9ea6726cda4c22;p=platform%2Fupstream%2Fllvm.git [RISCV] Refactor predicates for rvv intrinsic patterns. This patch does the following things: 1, Add accurate Predicates in intrinsic-pseudo pattern class depending on the vector type. 2, Make vmulh, vmulhsu, vmulhu and vsmul intrinsic only able to select in v extension. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D150550 --- diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 3708b00..0f9112c2 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -447,11 +447,17 @@ def HasVInstructionsAnyF : Predicate<"Subtarget->hasVInstructionsAnyF()">, "'V' (Vector Extension for Application Processors), 'Zve32f', " "'Zve64f' or 'Zve64d' (Vector Extensions for Embedded Processors)">; +def HasVInstructionsF64 : Predicate<"Subtarget->hasVInstructionsF64()">; + +def HasVInstructionsFullMultiply : Predicate<"Subtarget->hasVInstructionsFullMultiply()">; + def FeatureStdExtZvfh : SubtargetFeature<"experimental-zvfh", "HasStdExtZvfh", "true", "'Zvfh' (Vector Half-Precision Floating-Point)", [FeatureStdExtZve32f, FeatureStdExtZfhmin]>; +def HasVInstructionsF16 : Predicate<"Subtarget->hasVInstructionsF16()">; + def HasStdExtZfhOrZvfh : Predicate<"Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()">, AssemblerPredicate<(any_of FeatureStdExtZfh, FeatureStdExtZvfh), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index eb7dc8b..c797f98 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -675,6 +675,14 @@ class VPseudo : let VLMul = m.value; } +class GetVTypePredicates { + list Predicates = !cond(!eq(vti.Scalar, f16) : [HasVInstructionsF16], + !eq(vti.Scalar, f32) : [HasVInstructionsAnyF], + !eq(vti.Scalar, f64) : [HasVInstructionsF64], + !eq(vti.SEW, 64) : [HasVInstructionsI64], + true : [HasVInstructions]); +} + class VPseudoUSLoadNoMask : Pseudo<(outs RetClass:$rd), (ins GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>, @@ -4416,6 +4424,7 @@ multiclass VPatUnaryS_M vtilist> { foreach vti = vtilist in { + let Predicates = GetVTypePredicates.Predicates in def : VPatUnaryAnyMask_E { foreach vti = AllIntegerVectors in { - def : VPatUnaryNoMask; - def : VPatUnaryNoMaskTU; - def : VPatUnaryMaskTA; + let Predicates = GetVTypePredicates.Predicates in { + def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; + def : VPatUnaryMaskTA; + } } } @@ -4451,69 +4462,78 @@ multiclass VPatUnaryV_VF; - def : VPatUnaryNoMaskTU.Predicates, + GetVTypePredicates.Predicates) in { + def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; + def : VPatUnaryMaskTA; - def : VPatUnaryMaskTA; - } + } + } } multiclass VPatUnaryV_V vtilist> { foreach vti = vtilist in { - def : VPatUnaryNoMask; - def : VPatUnaryNoMaskTU.Predicates in { + def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; + def : VPatUnaryMaskTA; - def : VPatUnaryMaskTA; + } } } multiclass VPatUnaryV_V_E vtilist> { foreach vti = vtilist in { - def : VPatUnaryNoMask_E; - def : VPatUnaryNoMaskTU_E.Predicates in { + def : VPatUnaryNoMask_E; + def : VPatUnaryNoMaskTU_E; + def : VPatUnaryMaskTA_E; - def : VPatUnaryMaskTA_E; + } } } multiclass VPatNullaryV { foreach vti = AllIntegerVectors in { - def : Pat<(vti.Vector (!cast(intrinsic) - (vti.Vector undef), - VLOpFrag)), - (!cast(instruction#"_V_" # vti.LMul.MX) - GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (!cast(intrinsic) - (vti.Vector vti.RegClass:$merge), - VLOpFrag)), - (!cast(instruction#"_V_" # vti.LMul.MX # "_TU") - vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (!cast(intrinsic # "_mask") - (vti.Vector vti.RegClass:$merge), - (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction#"_V_" # vti.LMul.MX # "_MASK") - vti.RegClass:$merge, (vti.Mask V0), - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (!cast(intrinsic) + (vti.Vector undef), + VLOpFrag)), + (!cast(instruction#"_V_" # vti.LMul.MX) + GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (!cast(intrinsic) + (vti.Vector vti.RegClass:$merge), + VLOpFrag)), + (!cast(instruction#"_V_" # vti.LMul.MX # "_TU") + vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (!cast(intrinsic # "_mask") + (vti.Vector vti.RegClass:$merge), + (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), + (!cast(instruction#"_V_" # vti.LMul.MX # "_MASK") + vti.RegClass:$merge, (vti.Mask V0), + GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + } } } @@ -4683,6 +4703,7 @@ multiclass VPatConversionTA vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA vtilist> { foreach vti = vtilist in { defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA.ret; defvar ivti = !cast("VI" # eew # emul_str); defvar inst = instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA vtilist, Operand imm_type> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA { foreach mti = AllMasks in + let Predicates = [HasVInstructions] in def : VPatBinaryM; @@ -4783,6 +4813,8 @@ multiclass VPatBinaryW_VV.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA; - def : VPatBinaryNoMaskTU; - let AddedComplexity = 1 in { - def : VPatTiedBinaryNoMaskTU.Predicates, + GetVTypePredicates.Predicates) in { + def : VPatTiedBinaryNoMask; - def : VPatTiedBinaryMask; + def : VPatBinaryNoMaskTU; + let AddedComplexity = 1 in { + def : VPatTiedBinaryNoMaskTU; + def : VPatTiedBinaryMask; + } + def : VPatBinaryMaskTA; } - def : VPatBinaryMaskTA; } } @@ -4835,6 +4872,8 @@ multiclass VPatBinaryW_WX.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA.Predicates, + GetVTypePredicates.Predicates) in defm : VPatBinaryTA vtilist = AllIntegerVectors> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryIn vtilist = AllIntegerVectors> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryIn { foreach vti = AllIntegerVectors in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryIn vtilist = AllIntegerVectors> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryInTAIL vtilist = AllIntegerVectors> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryInTAIL { foreach vti = AllIntegerVectors in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryCarryInTAIL { foreach vti = AllIntegerVectors in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryMaskOut { multiclass VPatBinaryV_X { foreach vti = AllIntegerVectors in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryMaskOut { multiclass VPatBinaryV_I { foreach vti = AllIntegerVectors in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryMaskOut { multiclass VPatBinaryM_VV vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryM vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinarySwapped vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryM vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryM vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryWithPolicy vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryWithPolicy vtilist> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryWithPolicy vtilist, Operand Imm_type> { foreach vti = vtilist in + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryWithPolicy.Predicates, + GetVTypePredicates.Predicates) in defm : VPatTernaryWithPolicy.Predicates, + GetVTypePredicates.Predicates) in defm : VPatTernaryWithPolicy(!if(IsFloat, "VF", "VI") # vti.SEW # "M1"); + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryTA_E.Predicates in defm : VPatTernaryTA_E(!if(IsFloat, "VF", "VI") # wtiSEW # "M1"); + let Predicates = GetVTypePredicates.Predicates in defm : VPatTernaryTA_E.Vti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5307,7 +5377,8 @@ multiclass VPatConversionVF_VI.Vti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5319,7 +5390,8 @@ multiclass VPatConversionWI_VF { { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5331,7 +5403,8 @@ multiclass VPatConversionWF_VI { { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5343,7 +5416,8 @@ multiclass VPatConversionWF_VF { { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5355,7 +5429,8 @@ multiclass VPatConversionVI_WF { { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5367,7 +5442,8 @@ multiclass VPatConversionVF_WI { { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5379,7 +5455,8 @@ multiclass VPatConversionVF_WF { { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in defm : VPatConversionTA; @@ -5391,6 +5468,7 @@ multiclass VPatCompare_VI(intrinsic); defvar Pseudo = !cast(inst#"_VI_"#vti.LMul.MX); + let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1), (vti.Scalar ImmType:$rs2), VLOpFrag)), @@ -5398,6 +5476,7 @@ multiclass VPatCompare_VI; defvar IntrMask = !cast(intrinsic # "_mask"); defvar PseudoMask = !cast(inst#"_VI_"#vti.LMul.MX#"_MASK"); + let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti.Scalar ImmType:$rs2), @@ -5547,62 +5626,64 @@ foreach vti = AllIntegerVectors in { // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need // to use a more complex splat sequence. Add the pattern for all VTs for // consistency. - def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.Log2SEW)>; - def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") - vti.RegClass:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.Log2SEW)>; - def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), - VLOpFrag, - (XLenVT timm:$policy))), - (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK") - vti.RegClass:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - (vti.Mask V0), - GPR:$vl, - vti.Log2SEW, - (XLenVT timm:$policy))>; - - // Match VSUB with a small immediate to vadd.vi by negating the immediate. - def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), - (vti.Vector vti.RegClass:$rs1), - (vti.Scalar simm5_plus1:$rs2), - VLOpFrag)), - (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, - (NegImm simm5_plus1:$rs2), - GPR:$vl, - vti.Log2SEW)>; - def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), - (vti.Vector vti.RegClass:$rs1), - (vti.Scalar simm5_plus1:$rs2), - (vti.Mask V0), - VLOpFrag, - (XLenVT timm:$policy))), - (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") - vti.RegClass:$merge, - vti.RegClass:$rs1, - (NegImm simm5_plus1:$rs2), - (vti.Mask V0), - GPR:$vl, - vti.Log2SEW, - (XLenVT timm:$policy))>; + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs2, + GPR:$vl, + vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs2, + GPR:$vl, + vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + (vti.Mask V0), + VLOpFrag, + (XLenVT timm:$policy))), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs2, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + (XLenVT timm:$policy))>; + + // Match VSUB with a small immediate to vadd.vi by negating the immediate. + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + VLOpFrag)), + (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (NegImm simm5_plus1:$rs2), + GPR:$vl, + vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + VLOpFrag, + (XLenVT timm:$policy))), + (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + (NegImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + (XLenVT timm:$policy))>; + } } //===----------------------------------------------------------------------===// @@ -6191,7 +6272,6 @@ defm PseudoVCOMPRESS : VPseudoVCPR_V; // 11. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 11.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// @@ -6257,27 +6337,29 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. - def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - (XLenVT 1), VLOpFrag)), - (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs1, - GPR:$vl, - vti.Log2SEW)>; - def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge), - (vti.Vector vti.RegClass:$rs1), - (XLenVT 1), - (vti.Mask V0), - VLOpFrag, - (XLenVT timm:$policy))), - (!cast("PseudoVADD_VV_"#vti.LMul.MX#"_MASK") - vti.RegClass:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs1, - (vti.Mask V0), - GPR:$vl, - vti.Log2SEW, - (XLenVT timm:$policy))>; + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), VLOpFrag)), + (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs1, + GPR:$vl, + vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), + (vti.Mask V0), + VLOpFrag, + (XLenVT timm:$policy))), + (!cast("PseudoVADD_VV_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + (XLenVT timm:$policy))>; + } } //===----------------------------------------------------------------------===// @@ -6329,9 +6411,26 @@ defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>; // 11.10. Vector Single-Width Integer Multiply Instructions //===----------------------------------------------------------------------===// defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>; + +defvar IntegerVectorsExceptI64 = !filter(vti, AllIntegerVectors, + !ne(vti.SEW, 64)); +defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", + IntegerVectorsExceptI64>; +defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", + IntegerVectorsExceptI64>; +defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", + IntegerVectorsExceptI64>; + +// vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*. +defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64)); +let Predicates = [HasVInstructionsFullMultiply] in { + defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", + I64IntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", + I64IntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", + I64IntegerVectors>; +} //===----------------------------------------------------------------------===// // 11.11. Vector Integer Divide Instructions @@ -6373,18 +6472,20 @@ defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">; // 11.16. Vector Integer Move Instructions //===----------------------------------------------------------------------===// foreach vti = AllVectors in { - def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMV_V_V_"#vti.LMul.MX) - $rs1, GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMV_V_V_"#vti.LMul.MX#"_TU") - $passthru, $rs1, GPR:$vl, vti.Log2SEW)>; - - // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVMV_V_V_"#vti.LMul.MX) + $rs1, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVMV_V_V_"#vti.LMul.MX#"_TU") + $passthru, $rs1, GPR:$vl, vti.Log2SEW)>; + + // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td + } } //===----------------------------------------------------------------------===// @@ -6410,7 +6511,10 @@ defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>; //===----------------------------------------------------------------------===// // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>; +defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", IntegerVectorsExceptI64>; +// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. +let Predicates = [HasVInstructionsFullMultiply] in +defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", I64IntegerVectors>; //===----------------------------------------------------------------------===// // 12.4. Vector Single-Width Scaling Shift Instructions @@ -6426,13 +6530,10 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors, defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>; defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>; -} // Predicates = [HasVInstructions] - //===----------------------------------------------------------------------===// // 13. Vector Floating-Point Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructionsAnyF] in { //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// @@ -6540,12 +6641,14 @@ defm : VPatBinaryV_XM_TAIL<"int_riscv_vfmerge", "PseudoVFMERGE", foreach fvti = AllFloatVectors in { defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); + let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector undef), (fvti.Vector fvti.RegClass:$rs2), (fvti.Scalar (fpimm0)), (fvti.Mask V0), VLOpFrag)), (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; defvar instr_tu = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU"); + let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge), (fvti.Vector fvti.RegClass:$rs2), (fvti.Scalar (fpimm0)), @@ -6586,13 +6689,11 @@ defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">; defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; -} // Predicates = [HasVInstructionsAnyF] //===----------------------------------------------------------------------===// // 14. Vector Reduction Operations //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 14.1. Vector Single-Width Integer Reduction Instructions //===----------------------------------------------------------------------===// @@ -6610,9 +6711,7 @@ defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">; //===----------------------------------------------------------------------===// defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">; defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">; -} // Predicates = [HasVInstructions] -let Predicates = [HasVInstructionsAnyF] in { //===----------------------------------------------------------------------===// // 14.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// @@ -6627,13 +6726,10 @@ defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>; defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>; defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>; -} // Predicates = [HasVInstructionsAnyF] - //===----------------------------------------------------------------------===// // 15. Vector Mask Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { //===----------------------------------------------------------------------===// // 15.1 Vector Mask-Register Logical Instructions //===----------------------------------------------------------------------===// @@ -6685,7 +6781,6 @@ defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">; //===----------------------------------------------------------------------===// defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">; -} // Predicates = [HasVInstructions] //===----------------------------------------------------------------------===// // 16. Vector Permutation Instructions @@ -6695,79 +6790,68 @@ defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">; // 16.1. Integer Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { foreach vti = AllIntegerVectors in { + let Predicates = GetVTypePredicates.Predicates in def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)), (!cast("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>; // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td } -} // Predicates = [HasVInstructions] //===----------------------------------------------------------------------===// // 16.2. Floating-Point Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructionsAnyF] in { foreach fvti = AllFloatVectors in { - def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), - (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), - (!cast("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" # - fvti.LMul.MX) - (fvti.Vector $rs1), - (fvti.Scalar fvti.ScalarRegClass:$rs2), - GPR:$vl, fvti.Log2SEW)>; - - def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), - (fvti.Scalar (fpimm0)), VLOpFrag)), - (!cast("PseudoVMV_S_X_" # fvti.LMul.MX) - (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>; + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), + (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), + (!cast("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" # + fvti.LMul.MX) + (fvti.Vector $rs1), + (fvti.Scalar fvti.ScalarRegClass:$rs2), + GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), + (fvti.Scalar (fpimm0)), VLOpFrag)), + (!cast("PseudoVMV_S_X_" # fvti.LMul.MX) + (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>; + } } -} // Predicates = [HasVInstructionsAnyF] //===----------------------------------------------------------------------===// // 16.3. Vector Slide Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { - defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; - defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; - defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; - defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; -} // Predicates = [HasVInstructions] +defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; +defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; +defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; +defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; -let Predicates = [HasVInstructionsAnyF] in { - defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; - defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; - defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; - defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; -} // Predicates = [HasVInstructionsAnyF] +defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; +defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; //===----------------------------------------------------------------------===// // 16.4. Vector Register Gather Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { - defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", - AllIntegerVectors, uimm5>; - defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", +defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllIntegerVectors, uimm5>; +defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", /* eew */ 16, AllIntegerVectors>; -} // Predicates = [HasVInstructions] -let Predicates = [HasVInstructionsAnyF] in { - defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", - AllFloatVectors, uimm5>; - defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", +defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllFloatVectors, uimm5>; +defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", /* eew */ 16, AllFloatVectors>; -} // Predicates = [HasVInstructionsAnyF] - //===----------------------------------------------------------------------===// // 16.5. Vector Compress Instruction //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructions] in { - defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -} // Predicates = [HasVInstructions] - -let Predicates = [HasVInstructionsAnyF] in { - defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; -} // Predicates = [HasVInstructionsAnyF] +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 28fe78f..5e57328 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -165,6 +165,7 @@ public: bool hasVInstructionsF64() const { return HasStdExtZve64d && HasStdExtD; } // F16 and F64 both require F32. bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); } + bool hasVInstructionsFullMultiply() const { return HasStdExtV; } unsigned getMaxInterleaveFactor() const { return hasVInstructions() ? MaxInterleaveFactor : 1; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul.ll b/llvm/test/CodeGen/RISCV/rvv/vmul.ll index 7403784..91d255f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul.ll @@ -3,6 +3,11 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64d \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64d \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + declare @llvm.riscv.vmul.nxv1i8.nxv1i8( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll index dd50aa4..e6e6085 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll @@ -3,6 +3,13 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D +; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D + +; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulh + declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll index 13cfcc3..4275247 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll @@ -3,6 +3,13 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D +; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D + +; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhsu + declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll index 18bf8fb..6d50959 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll @@ -3,6 +3,13 @@ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D +; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \ +; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D + +; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhu + declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll index 95eedb4..cf528ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -1,6 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s +; RUN: not --crash llc -mtriple=riscv32 -mattr=+zve64d 2>&1 \ +; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D + +; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul + declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll index 8ac6126..df6cab7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -1,6 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s +; RUN: not --crash llc -mtriple=riscv64 -mattr=+zve64d 2>&1 \ +; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D + +; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul + declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( , ,